Fields
4 /// <summary> 5 /// The isolated storage subscription key file name. 6 /// </summary> 7 private const stringIsolatedStorageSubscriptionKeyFileName =
"Subscription.txt"; 8 9 /// <summary> 10 /// The default subscription key prompt message 11 /// </summary> 12 private const stringDefaultSubscriptionKeyPromptMessage =
"Secret key"; 13 14 /// <summary> 15 /// You can also put the primary key in app.config, instead of using UI. 16 /// string subscriptionKey = ConfigurationManager.AppSettings["primaryKey"]; 17 /// </summary> 18 private stringsubscriptionKey = ConfigurationManager.AppSettings[
"primaryKey"]; 19 20 /// <summary> 21 /// Gets or sets subscription key 22 /// </summary> 23 public string SubscriptionKey 24 { 25 get 26 { 27 return this.subscriptionKey; 28 } 29 30 set 31 { 32 this.subscriptionKey =
value; 33 this.OnPropertyChanged<
string>
(); 34 } 35 } 36 37 /// <summary> 38 /// The data recognition client 39 /// </summary> 40 private DataRecognitionClient dataClient; 41 42 /// <summary> 43 /// The microphone client 44 /// </summary> 45 private MicrophoneRecognitionClient micClient; 46 47 #endregionFields
48 49 #regionevent
50 /// <summary> 51 /// Implement INotifyPropertyChanged interface 52 /// </summary> 53 public event PropertyChangedEventHandler PropertyChanged; 54 55 /// <summary> 56 /// Helper function for INotifyPropertyChanged interface 57 /// </summary> 58 /// <typeparam name="T">Property type</typeparam> 59 /// <param name="caller">Property name</param> 60 private voidOnPropertyChanged
caller =
null) 61 { 62 this.PropertyChanged?.Invoke(
this,
new PropertyChangedEventArgs(caller)); 63 } 64 #endregionevent
65 66 #region属性
67 /// <summary> 68 /// Gets the current speech recognition mode. 69 /// </summary> 70 /// <value> 71 /// The speech recognition mode. 72 /// </value> 73 private SpeechRecognitionMode Mode 74 { 75 get 76 { 77 if(
this.IsMicrophoneClientDictation ||
78 this.IsDataClientDictation) 79 { 80 return SpeechRecognitionMode.LongDictation; 81 } 82 83 return SpeechRecognitionMode.ShortPhrase; 84 } 85 } 86 87 /// <summary> 88 /// Gets the default locale. 89 /// </summary> 90 /// <value> 91 /// The default locale. 92 /// </value> 93 private string DefaultLocale 94 { 95 //get { return "en-US"; } 96 get{
return "zh-CN"; } 97 98 } 99 100 /// <summary> 101 /// Gets the Cognitive Service Authentication Uri. 102 /// </summary> 103 /// <value> 104 /// The Cognitive Service Authentication Uri. Empty if the global default is to be used. 105 /// </value> 106 private string AuthenticationUri 107 { 108 get 109 { 110 returnConfigurationManager.AppSettings[
"AuthenticationUri"]; 111 } 112 } 113 114 /// <summary> 115 /// Gets a value indicating whether or not to use the microphone. 116 /// </summary> 117 /// <value> 118 /// <c>true</c> if [use microphone]; otherwise, <c>false</c>. 119 /// </value> 120 private bool UseMicrophone 121 { 122 get 123 { 124 return this.IsMicrophoneClientWithIntent ||
125 this.IsMicrophoneClientDictation ||
126 this.IsMicrophoneClientShortPhrase; 127 } 128 } 129 130 /// <summary> 131 /// Gets the short wave file path. 132 /// </summary> 133 /// <value> 134 /// The short wave file. 135 /// </value> 136 private string ShortWaveFile 137 { 138 get 139 { 140 returnConfigurationManager.AppSettings[
"ShortWaveFile"]; 141 } 142 } 143 144 /// <summary> 145 /// Gets the long wave file path. 146 /// </summary> 147 /// <value> 148 /// The long wave file. 149 /// </value> 150 private string LongWaveFile 151 { 152 get 153 { 154 returnConfigurationManager.AppSettings[
"LongWaveFile"]; 155 } 156 } 157 #endregion属性
158 159 #region模式选择控制器设置
160 /// <summary> 161 /// Gets or sets a value indicating whether this instance is microphone client short phrase. 162 /// </summary> 163 /// <value> 164 /// <c>true</c> if this instance is microphone client short phrase; otherwise, <c>false</c>. 165 /// </value> 166 public boolIsMicrophoneClientShortPhrase {
get;
set; } 167 168 /// <summary> 169 /// Gets or sets a value indicating whether this instance is microphone client dictation. 170 /// </summary> 171 /// <value> 172 /// <c>true</c> if this instance is microphone client dictation; otherwise, <c>false</c>. 173 /// </value> 174 public boolIsMicrophoneClientDictation {
get;
set; } 175 176 /// <summary> 177 /// Gets or sets a value indicating whether this instance is microphone client with intent. 178 /// </summary> 179 /// <value> 180 /// <c>true</c> if this instance is microphone client with intent; otherwise, <c>false</c>. 181 /// </value> 182 public boolIsMicrophoneClientWithIntent {
get;
set; } 183 184 /// <summary> 185 /// Gets or sets a value indicating whether this instance is data client short phrase. 186 /// </summary> 187 /// <value> 188 /// <c>true</c> if this instance is data client short phrase; otherwise, <c>false</c>. 189 /// </value> 190 public boolIsDataClientShortPhrase {
get;
set; } 191 192 /// <summary> 193 /// Gets or sets a value indicating whether this instance is data client with intent. 194 /// </summary> 195 /// <value> 196 /// <c>true</c> if this instance is data client with intent; otherwise, <c>false</c>. 197 /// </value> 198 public boolIsDataClientWithIntent {
get;
set; } 199 200 /// <summary> 201 /// Gets or sets a value indicating whether this instance is data client dictation. 202 /// </summary> 203 /// <value> 204 /// <c>true</c> if this instance is data client dictation; otherwise, <c>false</c>. 205 /// </value> 206 public boolIsDataClientDictation {
get;
set; } 207 208 #endregion 209 210 #region委托执行对象
211 /// <summary> 212 /// Called when the microphone status has changed. 213 /// </summary> 214 /// <param name="sender">The sender.</param> 215 /// <param name="e">The <see cref="MicrophoneEventArgs"/> instance containing the event data.</param> 216 private voidOnMicrophoneStatus(
object sender, MicrophoneEventArgs e) 217 { 218Task task =
newTask(() =>
219 { 220Console.WriteLine(
"--- Microphone status change received by OnMicrophoneStatus() ---"); 221Console.WriteLine(
"********* Microphone status: {0} *********", e.Recording); 222 if (e.Recording) 223 { 224Console.WriteLine(
"Please start speaking."); 225 } 226 227 Console.WriteLine(); 228 }); 229 task.Start(); 230 } 231 232 /// <summary> 233 /// Called when a partial response is received. 234 /// </summary> 235 /// <param name="sender">The sender.</param> 236 /// <param name="e">The <see cref="PartialSpeechResponseEventArgs"/> instance containing the event data.</param> 237 private voidOnPartialResponseReceivedHandler(
object sender, PartialSpeechResponseEventArgs e) 238 { 239Console.WriteLine(
"--- Partial result received by OnPartialResponseReceivedHandler() ---"); 240Console.WriteLine(
"{0}", e.PartialResult); 241 Console.WriteLine(); 242 } 243 244 /// <summary> 245 /// Called when an error is received. 246 /// </summary> 247 /// <param name="sender">The sender.</param> 248 /// <param name="e">The <see cref="SpeechErrorEventArgs"/> instance containing the event data.</param> 249 private voidOnConversationErrorHandler(
object sender, SpeechErrorEventArgs e) 250 { 251Console.WriteLine(
"--- Error received by OnConversationErrorHandler() ---"); 252Console.WriteLine(
"Error code: {0}", e.SpeechErrorCode.ToString()); 253Console.WriteLine(
"Error text: {0}", e.SpeechErrorText); 254 Console.WriteLine(); 255 } 256 257 /// <summary> 258 /// Called when a final response is received; 259 /// </summary> 260 /// <param name="sender">The sender.</param> 261 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param> 262 private voidOnMicShortPhraseResponseReceivedHandler(
object sender, SpeechResponseEventArgs e) 263 { 264Task task =
newTask(() =>
265 { 266Console.WriteLine(
"--- OnMicShortPhraseResponseReceivedHandler ---"); 267 268 // we got the final result, so it we can end the mic reco. No need to do this 269 // for dataReco, since we already called endAudio() on it as soon as we were done 270 // sending all the data. 271 this.micClient.EndMicAndRecognition(); 272 273 this.WriteResponseResult(e); 274 }); 275 task.Start(); 276 } 277 278 /// <summary> 279 /// Called when a final response is received; 280 /// </summary> 281 /// <param name="sender">The sender.</param> 282 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param> 283 private voidOnDataShortPhraseResponseReceivedHandler(
object sender, SpeechResponseEventArgs e) 284 { 285Task task =
newTask(() =>
286 { 287Console.WriteLine(
"--- OnDataShortPhraseResponseReceivedHandler ---"); 288 289 // we got the final result, so it we can end the mic reco. No need to do this 290 // for dataReco, since we already called endAudio() on it as soon as we were done 291 // sending all the data. 292 this.WriteResponseResult(e); 293 294 }); 295 task.Start(); 296 } 297 298 /// <summary> 299 /// Called when a final response is received; 300 /// </summary> 301 /// <param name="sender">The sender.</param> 302 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param> 303 private voidOnMicDictationResponseReceivedHandler(
object sender, SpeechResponseEventArgs e) 304 { 305Console.WriteLine(
"--- OnMicDictationResponseReceivedHandler ---"); 306 if(e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
307e.PhraseResponse.RecognitionStatus ==
RecognitionStatus.DictationEndSilenceTimeout) 308 { 309Task task =
newTask(() =>
310 { 311 // we got the final result, so it we can end the mic reco. No need to do this 312 // for dataReco, since we already called endAudio() on it as soon as we were done 313 // sending all the data. 314 this.micClient.EndMicAndRecognition(); 315 }); 316 task.Start(); 317 } 318 319 this.WriteResponseResult(e); 320 } 321 322 /// <summary> 323 /// Called when a final response is received; 324 /// </summary> 325 /// <param name="sender">The sender.</param> 326 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param> 327 private voidOnDataDictationResponseReceivedHandler(
object sender, SpeechResponseEventArgs e) 328 { 329Console.WriteLine(
"--- OnDataDictationResponseReceivedHandler ---"); 330 if(e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
331e.PhraseResponse.RecognitionStatus ==
RecognitionStatus.DictationEndSilenceTimeout) 332 { 333Task task =
newTask(() =>
334 { 335 336 // we got the final result, so it we can end the mic reco. No need to do this 337 // for dataReco, since we already called endAudio() on it as soon as we were done 338 // sending all the data. 339 }); 340 task.Start(); 341 } 342 343 this.WriteResponseResult(e); 344 } 345 346 /// <summary> 347 /// Sends the audio helper. 348 /// </summary> 349 /// <param name="wavFileName">Name of the wav file.</param> 350 private voidSendAudioHelper(
string wavFileName) 351 { 352 using(FileStream fileStream =
new FileStream(wavFileName, FileMode.Open, FileAccess.Read)) 353 { 354 // Note for wave files, we can just send data from the file right to the server. 355 // In the case you are not an audio file in wave format, and instead you have just 356 // raw data (for example audio coming over bluetooth), then before sending up any 357 // audio data, you must first send up an SpeechAudioFormat descriptor to describe 358 // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method. 359 intbytesRead =
0; 360 byte[] buffer =
new byte[
1024]; 361 362 try 363 { 364 do 365 { 366 // Get more Audio data to send into byte buffer. 367bytesRead = fileStream.Read(buffer,
0, buffer.Length); 368 369 // Send of audio data to service. 370 this.dataClient.SendAudio(buffer, bytesRead); 371 } 372 while(bytesRead >
0); 373 } 374 finally 375 { 376 // We are done sending audio. Final recognition results will arrive in OnResponseReceived event call. 377 this.dataClient.EndAudio(); 378 } 379 } 380 } 381 #endregion委托执行对象
382 383 #region辅助方法
384 /// <summary> 385 /// Gets the subscription key from isolated storage. 386 /// </summary> 387 /// <returns>The subscription key.</returns> 388 private string GetSubscriptionKeyFromIsolatedStorage() 389 { 390 stringsubscriptionKey =
null; 391 392 using(IsolatedStorageFile isoStore = IsolatedStorageFile.GetStore(IsolatedStorageScope.User | IsolatedStorageScope.Assembly,
null,
null)) 393 { 394 try 395 { 396 using(
variStream =
new IsolatedStorageFileStream(IsolatedStorageSubscriptionKeyFileName, FileMode.Open, isoStore)) 397 { 398 using(
varreader =
new StreamReader(iStream)) 399 { 400subscriptionKey =
reader.ReadLine(); 401 } 402 } 403 } 404 catch (FileNotFoundException) 405 { 406subscriptionKey =
null; 407 } 408 } 409 410 if(
string.IsNullOrEmpty(subscriptionKey)) 411 { 412subscriptionKey =
DefaultSubscriptionKeyPromptMessage; 413 } 414 415 return subscriptionKey; 416 } 417 418 /// <summary> 419 /// Creates a new microphone reco client without LUIS intent support. 420 /// </summary> 421 private void CreateMicrophoneRecoClient() 422 { 423 this.micClient =
SpeechRecognitionServiceFactory.CreateMicrophoneClient( 424 this.Mode,
this.DefaultLocale,
this.SubscriptionKey); 425 426 this.micClient.AuthenticationUri =
this.AuthenticationUri; 427 428 // Event handlers for speech recognition results 429 this.micClient.OnMicrophoneStatus +=
this.OnMicrophoneStatus; 430 this.micClient.OnPartialResponseReceived +=
this.OnPartialResponseReceivedHandler; 431 if(
this.Mode ==
SpeechRecognitionMode.ShortPhrase) 432 { 433 this.micClient.OnResponseReceived +=
this.OnMicShortPhraseResponseReceivedHandler; 434 } 435 else if(
this.Mode ==
SpeechRecognitionMode.LongDictation) 436 { 437 this.micClient.OnResponseReceived +=
this.OnMicDictationResponseReceivedHandler; 438 } 439 440 this.micClient.OnConversationError +=
this.OnConversationErrorHandler; 441 } 442 443 /// <summary> 444 /// Creates a data client without LUIS intent support. 445 /// Speech recognition with data (for example from a file or audio source). 446 /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. 447 /// No modification is done to the buffers, so the user can apply their 448 /// own Silence Detection if desired. 449 /// </summary> 450 private void CreateDataRecoClient() 451 { 452 this.dataClient =
SpeechRecognitionServiceFactory.CreateDataClient( 453 this.Mode, 454 this.DefaultLocale, 455 this.SubscriptionKey); 456 this.dataClient.AuthenticationUri =
this.AuthenticationUri; 457 458 // Event handlers for speech recognition results 459 if(
this.Mode ==
SpeechRecognitionMode.ShortPhrase) 460 { 461 this.dataClient.OnResponseReceived +=
this.OnDataShortPhraseResponseReceivedHandler; 462 } 463 else 464 { 465 this.dataClient.OnResponseReceived +=
this.OnDataDictationResponseReceivedHandler; 466 } 467 468 this.dataClient.OnPartialResponseReceived +=
this.OnPartialResponseReceivedHandler; 469 this.dataClient.OnConversationError +=
this.OnConversationErrorHandler; 470 } 471 472 /// <summary> 473 /// Writes the response result. 474 /// </summary> 475 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param> 476 private void WriteResponseResult(SpeechResponseEventArgs e) 477 { 478 if(e.PhraseResponse.Results.Length ==
0) 479 { 480Console.WriteLine(
"No phrase response is available."); 481 } 482 else 483 { 484Console.WriteLine(
"********* Final n-BEST Results *********"); 485 for(
inti =
0; i < e.PhraseResponse.Results.Length; i++
) 486 { 487 Console.WriteLine( 488 "[{0}] Confidence={1}, Text=\"{2}\"", 489 i, 490 e.PhraseResponse.Results[i].Confidence, 491 e.PhraseResponse.Results[i].DisplayText); 492 if(e.PhraseResponse.Results[i].DisplayText ==
"关闭。") 493 { 494Console.WriteLine(
"收到命令,马上关闭"); 495 } 496 } 497 498 Console.WriteLine(); 499 } 500 } 501 #endregion辅助方法
502 503 #regionInit
504 public SpeechConfig() 505 { 506 this.IsMicrophoneClientShortPhrase =
true; 507 this.IsMicrophoneClientWithIntent =
false; 508 this.IsMicrophoneClientDictation =
false; 509 this.IsDataClientShortPhrase =
false; 510 this.IsDataClientWithIntent =
false; 511 this.IsDataClientDictation =
false; 512 513 this.SubscriptionKey =
this.GetSubscriptionKeyFromIsolatedStorage(); 514 } 515 516 /// <summary> 517 /// 语音识别开始执行 518 /// </summary> 519 public void SpeechRecognize() 520 { 521 if(
this.UseMicrophone) 522 { 523 if(
this.micClient ==
null) 524 { 525 this.CreateMicrophoneRecoClient(); 526 } 527 528 this.micClient.StartMicAndRecognition(); 529 } 530 else 531 { 532 if(
null==
this.dataClient) 533 { 534 this.CreateDataRecoClient(); 535 } 536 537 this.SendAudioHelper((
this.Mode == SpeechRecognitionMode.ShortPhrase) ?
this.ShortWaveFile :
this.LongWaveFile); 538 } 539 } 540 #endregionInit
541}
来源: http://www.cnblogs.com/CKExp/p/7400969.html