@@ -333,6 +333,10 @@ using (var speechRecognizer = new SpeechRecognizer(
333
333
speechConfig ,
334
334
audioConfig ))
335
335
{
336
+ // (Optional) get the session ID
337
+ speechRecognizer .SessionStarted += (s , e ) => {
338
+ Console .WriteLine ($" SESSION ID: {e .SessionId }" );
339
+ };
336
340
pronunciationAssessmentConfig .ApplyTo (speechRecognizer );
337
341
var speechRecognitionResult = await speechRecognizer .RecognizeOnceAsync ();
338
342
@@ -355,7 +359,10 @@ Word, syllable, and phoneme results aren't available by using SDK objects with t
355
359
auto speechRecognizer = SpeechRecognizer::FromConfig(
356
360
speechConfig,
357
361
audioConfig);
358
-
362
+ // (Optional) get the session ID
363
+ speechRecognizer->SessionStarted.Connect([](const SessionEventArgs& e) {
364
+ std::cout << "SESSION ID: " << e.SessionId << std::endl;
365
+ });
359
366
pronunciationAssessmentConfig->ApplyTo (speechRecognizer);
360
367
speechRecognitionResult = speechRecognizer->RecognizeOnceAsync().get();
361
368
@@ -372,13 +379,17 @@ To learn how to specify the learning language for pronunciation assessment in yo
372
379
::: zone-end
373
380
374
381
::: zone pivot="programming-language-java"
382
+
375
383
For Android application development, the word, syllable, and phoneme results are available by using SDK objects with the Speech SDK for Java. The results are also available in the JSON string. For Java Runtime (JRE) application development, the word, syllable, and phoneme results are only available in the JSON string.
376
384
377
385
```Java
378
386
SpeechRecognizer speechRecognizer = new SpeechRecognizer(
379
387
speechConfig,
380
388
audioConfig);
381
-
389
+ // (Optional) get the session ID
390
+ speechRecognizer.sessionStarted.addEventListener((s, e) -> {
391
+ System.out.println("SESSION ID: " + e.getSessionId());
392
+ });
382
393
pronunciationAssessmentConfig.applyTo(speechRecognizer);
383
394
Future<SpeechRecognitionResult> future = speechRecognizer.recognizeOnceAsync();
384
395
SpeechRecognitionResult speechRecognitionResult = future.get(30, TimeUnit.SECONDS);
@@ -403,7 +414,10 @@ speechRecognitionResult.close();
403
414
404
415
``` JavaScript
405
416
var speechRecognizer = SpeechSDK .SpeechRecognizer .FromConfig (speechConfig, audioConfig);
406
-
417
+ // (Optional) get the session ID
418
+ speechRecognizer .sessionStarted = (s , e ) => {
419
+ console .log (` SESSION ID: ${ e .sessionId } ` );
420
+ };
407
421
pronunciationAssessmentConfig .applyTo (speechRecognizer);
408
422
409
423
speechRecognizer .recognizeOnceAsync ((speechRecognitionResult : SpeechSDK .SpeechRecognitionResult ) => {
@@ -426,10 +440,10 @@ To learn how to specify the learning language for pronunciation assessment in yo
426
440
speech_recognizer = speechsdk.SpeechRecognizer(
427
441
speech_config = speech_config, \
428
442
audio_config = audio_config)
429
-
443
+ # (Optional) get the session ID
444
+ speech_recognizer.session_started.connect(lambda evt : print (f " SESSION ID: { evt.session_id} " ))
430
445
pronunciation_assessment_config.apply_to(speech_recognizer)
431
446
speech_recognition_result = speech_recognizer.recognize_once()
432
-
433
447
# The pronunciation assessment result as a Speech SDK object
434
448
pronunciation_assessment_result = speechsdk.PronunciationAssessmentResult(speech_recognition_result)
435
449
@@ -447,7 +461,10 @@ To learn how to specify the learning language for pronunciation assessment in yo
447
461
SPXSpeechRecognizer* speechRecognizer = \
448
462
[[SPXSpeechRecognizer alloc ] initWithSpeechConfiguration: speechConfig
449
463
audioConfiguration: audioConfig ] ;
450
-
464
+ // (Optional) get the session ID
465
+ [ speechRecognizer addSessionStartedEventHandler: ^ (SPXRecognizer * sender, SPXSessionEventArgs * eventArgs) {
466
+ NSLog(@"SESSION ID: %@", eventArgs.sessionId);
467
+ }] ;
451
468
[ pronunciationAssessmentConfig applyToRecognizer: speechRecognizer ] ;
452
469
453
470
SPXSpeechRecognitionResult * speechRecognitionResult = [ speechRecognizer recognizeOnce] ;
@@ -467,7 +484,9 @@ To learn how to specify the learning language for pronunciation assessment in yo
467
484
468
485
```swift
469
486
let speechRecognizer = try! SPXSpeechRecognizer(speechConfiguration: speechConfig, audioConfiguration: audioConfig)
470
-
487
+ // (Optional) get the session ID
488
+ speechRecognizer.addSessionStartedEventHandler { (sender, evt) in
489
+ print("SESSION ID: \(evt.sessionId)")
471
490
try! pronConfig.apply(to: speechRecognizer)
472
491
473
492
let speechRecognitionResult = try? speechRecognizer.recognizeOnce()
0 commit comments