Skip to content

Commit a0d7e48

Browse files
committed
lint fixes
1 parent d93a8e2 commit a0d7e48

File tree

5 files changed

+44
-32
lines changed

5 files changed

+44
-32
lines changed

examples/static/audio-video-deprecated/audio-video-app.js

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,18 @@
11
// recognizeElement example, now deprecated
22
// requires browserify
33

4+
'use strict';
5+
46
// global window.fetch pollyfill for IE/Edge & Older Chrome/FireFox
57
require('whatwg-fetch');
68

79
// keep the bundle slim by only requiring the necessary modules
810
var recognizeElement = require('./recognize-element');
911

10-
document.querySelector('#button').onclick = function () {
12+
document.querySelector('#button').onclick = function() {
1113
fetch('/api/speech-to-text/token').then(function(response) {
1214
return response.text();
13-
}).then(function (token) {
15+
}).then(function(token) {
1416
var stream = recognizeElement({
1517
// muteSource: true, // prevents sound from also playing locally
1618
token: token,

examples/static/audio-video-deprecated/media-element-audio-stream.js

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,10 @@ function MediaElementAudioStream(element, options) {
3030
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createScriptProcessor
3131
// Possible values: null, 256, 512, 1024, 2048, 4096, 8192, 16384
3232
// however, webkitAudioContext (safari) requires it to be set
33-
bufferSize: (typeof AudioContext != "undefined" ? null : 4096),
33+
bufferSize: (window.AudioContext ? 4096 : null),
3434
muteSource: false,
3535
autoPlay: true,
36-
crossOrigin: "anonymous", // required for cross-domain audio playback
36+
crossOrigin: 'anonymous', // required for cross-domain audio playback
3737
objectMode: true // true = emit AudioBuffers w/ audio + some metadata, false = emite node.js Buffers (with binary data only
3838
});
3939

@@ -68,14 +68,14 @@ function MediaElementAudioStream(element, options) {
6868
// onaudioprocess can be called at least once after we've stopped
6969
if (recording) {
7070
// todo: interleave channels in binary mode
71-
self.push( options.objectMode ? e.inputBuffer : new Buffer(e.inputBuffer.getChannelData(0)) );
71+
self.push(options.objectMode ? e.inputBuffer : new Buffer(e.inputBuffer.getChannelData(0)));
7272
}
7373
}
7474

7575
var AudioContext = window.AudioContext || window.webkitAudioContext;
7676
// cache the source node & context since it's not possible to recreate it later
7777
var context = element.context = element.context || new AudioContext();
78-
var audioInput = element.node = element.node || context.createMediaElementSource(element);
78+
var audioInput = element.node = element.node || context.createMediaElementSource(element);
7979
var scriptProcessor = context.createScriptProcessor(options.bufferSize, inputChannels, outputChannels);
8080

8181
scriptProcessor.onaudioprocess = processAudio;
@@ -96,41 +96,49 @@ function MediaElementAudioStream(element, options) {
9696
audioInput.connect(scriptProcessor);
9797
// other half of workaround for chrome bugs
9898
scriptProcessor.connect(context.destination);
99-
element.removeEventListener("playing", connect);
99+
element.removeEventListener('playing', connect);
100100
}
101-
element.addEventListener("playing", connect);
101+
element.addEventListener('playing', connect);
102102

103-
// https://developer.mozilla.org/en-US/docs/Web/Guide/Events/Media_events
104-
// https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/readyState
103+
/**
104+
* @see https://developer.mozilla.org/en-US/docs/Web/Guide/Events/Media_events
105+
* @see https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/readyState
106+
*/
105107
function start() {
106108
element.play();
107-
element.removeEventListener("canplaythrough", start);
109+
element.removeEventListener('canplaythrough', start);
108110
}
109111
if (options.autoPlay) {
110112
// play immediately if we have enough data, otherwise wait for the canplaythrough event
111-
if(element.readyState === element.HAVE_ENOUGH_DATA) {
113+
if (element.readyState === element.HAVE_ENOUGH_DATA) {
112114
element.play();
113115
} else {
114-
element.addEventListener("canplaythrough", start);
116+
element.addEventListener('canplaythrough', start);
115117
}
116118
}
117119

120+
/**
121+
* cleanup
122+
*/
118123
function end() {
119124
recording = false;
120125
scriptProcessor.disconnect();
121126
audioInput.disconnect();
122-
//context.close(); // this prevents us from re-using the same audio element until the page is refreshed
127+
// context.close(); // this prevents us from re-using the same audio element until the page is refreshed
123128
self.push(null);
124129
self.emit('close');
125130
}
126-
element.addEventListener("ended", end);
131+
element.addEventListener('ended', end);
127132

133+
/**
134+
* external API
135+
*/
128136
this.stop = function() {
129137
element.pause();
130138
end();
131139
};
132140

133-
element.addEventListener("error", this.emit.bind(this, 'error'));
141+
element.addEventListener('error', this.emit.bind(this, 'error'));
134142

135143
process.nextTick(function() {
136144
// this is more useful for binary mode than object mode, but it won't hurt either way

examples/static/audio-video-deprecated/recognize-element.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ var WritableElementStream = require('watson-speech/speech-to-text/writable-eleme
3737
*/
3838
module.exports = function recognizeElement(options) {
3939
if (!options || !options.token) {
40-
throw new Error("WatsonSpeechToText: missing required parameter: opts.token");
40+
throw new Error('WatsonSpeechToText: missing required parameter: opts.token');
4141
}
4242

4343
// the WritableElementStream works best in objectMode
@@ -73,7 +73,7 @@ module.exports = function recognizeElement(options) {
7373
recognizeStream.on('stop', sourceStream.stop.bind(sourceStream));
7474

7575
if (options.outputElement) {
76-
stream.pipe(new WritableElementStream(options))
76+
stream.pipe(new WritableElementStream(options));
7777
}
7878

7979
return stream;

examples/static/browserify-app.js

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,31 @@
11
// Browserify bundling example
22

3+
'use strict';
4+
35
// global window.fetch pollyfill for IE/Edge & Older Chrome/FireFox
46
require('whatwg-fetch');
57

68
// keep the bundle slim by only requiring the necessary modules
79
var recognizeMicrophone = require('watson-speech/speech-to-text/recognize-microphone');
810

9-
document.querySelector('#button').onclick = function () {
11+
document.querySelector('#button').onclick = function() {
1012

1113
fetch('/api/speech-to-text/token')
1214
.then(function(response) {
1315
return response.text();
14-
}).then(function (token) {
16+
}).then(function(token) {
1517

16-
var stream = recognizeMicrophone({
17-
token: token,
18-
continuous: false, // false = automatically stop transcription the first time a pause is detected
19-
outputElement: '#output' // CSS selector or DOM Element
20-
});
18+
var stream = recognizeMicrophone({
19+
token: token,
20+
continuous: false, // false = automatically stop transcription the first time a pause is detected
21+
outputElement: '#output' // CSS selector or DOM Element
22+
});
2123

22-
stream.on('error', function(err) {
23-
console.log(err);
24-
});
24+
stream.on('error', function(err) {
25+
console.log(err);
26+
});
2527

26-
}).catch(function(error) {
27-
console.log(error);
28-
});
28+
}).catch(function(error) {
29+
console.log(error);
30+
});
2931
};

speech-to-text/recognize-stream.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ RecognizeStream.prototype.initialize = function() {
176176
options['X-Watson-Learning-Opt-Out'] = options['X-WDC-PL-OPT-OUT'];
177177
}
178178

179-
var queryParams=util._extend('customization_id' in options ? pick(options, QUERY_PARAMS_ALLOWED):{model: 'en-US_BroadbandModel'}, pick(options, QUERY_PARAMS_ALLOWED));
179+
var queryParams = util._extend('customization_id' in options ? pick(options, QUERY_PARAMS_ALLOWED) : {model: 'en-US_BroadbandModel'}, pick(options, QUERY_PARAMS_ALLOWED));
180180

181181
var queryString = qs.stringify(queryParams);
182182
var url = (options.url || 'wss://stream.watsonplatform.net/speech-to-text/api').replace(/^http/, 'ws') + '/v1/recognize?' + queryString;

0 commit comments

Comments
 (0)