Skip to content

Commit 2f7f79c

Browse files
committed
docs: update README examples to use const and let
1 parent fca7ac6 commit 2f7f79c

File tree

17 files changed

+1093
-1269
lines changed

17 files changed

+1093
-1269
lines changed

lib/node_modules/@stdlib/nlp/lda/README.md

Lines changed: 23 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -33,26 +33,23 @@ limitations under the License.
3333
## Usage
3434

3535
```javascript
36-
var lda = require( '@stdlib/nlp/lda' );
36+
const lda = require( '@stdlib/nlp/lda' );
3737
```
3838

3939
#### lda( docs, K\[, options] )
4040

4141
[Latent Dirichlet Allocation][lda] via collapsed Gibbs sampling. To create a model, call the `lda` function by passing it an `array` of `strings` and the number of topics `K` that should be identified.
4242

4343
```javascript
44-
var model;
45-
var docs;
46-
47-
docs = [
44+
const docs = [
4845
'I loved you first',
4946
'For one is both and both are one in love',
5047
'You never see my pain',
5148
'My love is such that rivers cannot quench',
5249
'See a lot of pain, a lot of tears'
5350
];
5451

55-
model = lda( docs, 2 );
52+
const model = lda( docs, 2 );
5653
// returns {}
5754
```
5855

@@ -77,7 +74,7 @@ Returns the `no` terms with the highest probabilities for chosen topic `k`.
7774
<!-- run-disable -->
7875

7976
```javascript
80-
var words = model.getTerms( 0, 3 );
77+
const words = model.getTerms( 0, 3 );
8178
/* returns
8279
[
8380
{ 'word': 'both', 'prob': 0.06315008476532499 },
@@ -98,62 +95,54 @@ var words = model.getTerms( 0, 3 );
9895
<!-- eslint no-undef: "error" -->
9996

10097
```javascript
101-
var sotu = require( '@stdlib/datasets/sotu' );
102-
var roundn = require( '@stdlib/math/base/special/roundn' );
103-
var stopwords = require( '@stdlib/datasets/stopwords-en' );
104-
var lowercase = require( '@stdlib/string/lowercase' );
105-
var lda = require( '@stdlib/nlp/lda' );
106-
107-
var speeches;
108-
var words;
109-
var terms;
110-
var model;
111-
var str;
112-
var i;
113-
var j;
114-
115-
words = stopwords();
116-
for ( i = 0; i < words.length; i++ ) {
98+
const sotu = require( '@stdlib/datasets/sotu' );
99+
const roundn = require( '@stdlib/math/base/special/roundn' );
100+
const stopwords = require( '@stdlib/datasets/stopwords-en' );
101+
const lowercase = require( '@stdlib/string/lowercase' );
102+
const lda = require( '@stdlib/nlp/lda' );
103+
104+
const words = stopwords();
105+
for ( let i = 0; i < words.length; i++ ) {
117106
words[ i ] = new RegExp( '\\b'+words[ i ]+'\\b', 'gi' );
118107
}
119108

120-
speeches = sotu({
109+
const speeches = sotu({
121110
'range': [ 1930, 2010 ]
122111
});
123-
for ( i = 0; i < speeches.length; i++ ) {
124-
str = lowercase( speeches[ i ].text );
125-
for ( j = 0; j < words.length; j++ ) {
112+
for ( let i = 0; i < speeches.length; i++ ) {
113+
let str = lowercase( speeches[ i ].text );
114+
for ( let j = 0; j < words.length; j++ ) {
126115
str = str.replace( words[ j ], '' );
127116
}
128117
speeches[ i ] = str;
129118
}
130119

131-
model = lda( speeches, 3 );
120+
const model = lda( speeches, 3 );
132121

133122
model.fit( 1000, 100, 10 );
134123

135-
for ( i = 0; i <= 80; i++ ) {
136-
str = 'Year: ' + (1930+i) + '\t';
124+
for ( let i = 0; i <= 80; i++ ) {
125+
let str = 'Year: ' + (1930+i) + '\t';
137126
str += 'Topic 1: ' + roundn( model.avgTheta.get( i, 0 ), -3 ) + '\t';
138127
str += 'Topic 2: ' + roundn( model.avgTheta.get( i, 1 ), -3 ) + '\t';
139128
str += 'Topic 3: ' + roundn( model.avgTheta.get( i, 2 ), -3 );
140129
console.log( str );
141130
}
142131

143-
terms = model.getTerms( 0, 20 );
144-
for ( i = 0; i < terms.length; i++ ) {
132+
let terms = model.getTerms( 0, 20 );
133+
for ( let i = 0; i < terms.length; i++ ) {
145134
terms[ i ] = terms[ i ].word;
146135
}
147136
console.log( 'Words most associated with first topic:\n ' + terms.join( ', ' ) );
148137

149138
terms = model.getTerms( 1, 20 );
150-
for ( i = 0; i < terms.length; i++ ) {
139+
for ( let i = 0; i < terms.length; i++ ) {
151140
terms[ i ] = terms[ i ].word;
152141
}
153142
console.log( 'Words most associated with second topic:\n ' + terms.join( ', ' ) );
154143

155144
terms = model.getTerms( 2, 20 );
156-
for ( i = 0; i < terms.length; i++ ) {
145+
for ( let i = 0; i < terms.length; i++ ) {
157146
terms[ i ] = terms[ i ].word;
158147
}
159148
console.log( 'Words most associated with third topic:\n ' + terms.join( ', ' ) );

0 commit comments

Comments
 (0)