/native/external/espeak/src/synthesize.cpp

http://eyes-free.googlecode.com/ · C++ · 1628 lines · 1247 code · 287 blank · 94 comment · 296 complexity · 9f3a10693542d29d1e0f1ec7b3f9fc32 MD5 · raw file

  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: jonsd@users.sourceforge.net *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "StdAfx.h"
  20. #include <stdio.h>
  21. #include <ctype.h>
  22. #include <wctype.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include <math.h>
  26. #include "speak_lib.h"
  27. #include "speech.h"
  28. #include "phoneme.h"
  29. #include "synthesize.h"
  30. #include "voice.h"
  31. #include "translate.h"
  32. extern FILE *f_log;
  33. static void SmoothSpect(void);
  34. // list of phonemes in a clause
  35. int n_phoneme_list=0;
  36. PHONEME_LIST phoneme_list[N_PHONEME_LIST];
  37. char mbrola_name[20];
  38. int speed_factor1;
  39. int speed_factor2;
  40. int speed_min_sample_len;
  41. static int last_pitch_cmd;
  42. static int last_amp_cmd;
  43. static frame_t *last_frame;
  44. static int last_wcmdq;
  45. static int pitch_length;
  46. static int amp_length;
  47. static int modn_flags;
  48. static int syllable_start;
  49. static int syllable_end;
  50. static int syllable_centre;
  51. static voice_t *new_voice=NULL;
  52. int n_soundicon_tab=N_SOUNDICON_SLOTS;
  53. SOUND_ICON soundicon_tab[N_SOUNDICON_TAB];
  54. #define RMS_GLOTTAL1 35 // vowel before glottal stop
  55. #define RMS_START 28 // 14 - 30
  56. #define VOWEL_FRONT_LENGTH 50
  57. // a dummy phoneme_list entry which looks like a pause
  58. static PHONEME_LIST next_pause;
  59. const char *WordToString(unsigned int word)
  60. {//========================================
  61. // Convert a phoneme mnemonic word into a string
  62. int ix;
  63. static char buf[5];
  64. for(ix=0; ix<3; ix++)
  65. buf[ix] = word >> (ix*8);
  66. buf[4] = 0;
  67. return(buf);
  68. }
  69. void SynthesizeInit()
  70. {//==================
  71. last_pitch_cmd = 0;
  72. last_amp_cmd = 0;
  73. last_frame = NULL;
  74. syllable_centre = -1;
  75. // initialise next_pause, a dummy phoneme_list entry
  76. // next_pause.ph = phoneme_tab[phonPAUSE]; // this must be done after voice selection
  77. next_pause.type = phPAUSE;
  78. next_pause.newword = 0;
  79. }
  80. static void EndAmplitude(void)
  81. {//===========================
  82. if(amp_length > 0)
  83. {
  84. if(wcmdq[last_amp_cmd][1] == 0)
  85. wcmdq[last_amp_cmd][1] = amp_length;
  86. amp_length = 0;
  87. }
  88. }
  89. static void EndPitch(int voice_break)
  90. {//==================================
  91. // posssible end of pitch envelope, fill in the length
  92. if((pitch_length > 0) && (last_pitch_cmd >= 0))
  93. {
  94. if(wcmdq[last_pitch_cmd][1] == 0)
  95. wcmdq[last_pitch_cmd][1] = pitch_length;
  96. pitch_length = 0;
  97. }
  98. if(voice_break)
  99. {
  100. last_wcmdq = -1;
  101. last_frame = NULL;
  102. syllable_end = wcmdq_tail;
  103. SmoothSpect();
  104. syllable_centre = -1;
  105. memset(vowel_transition,0,sizeof(vowel_transition));
  106. }
  107. } // end of Synthesize::EndPitch
  108. static void DoAmplitude(int amp, unsigned char *amp_env)
  109. {//=====================================================
  110. long *q;
  111. last_amp_cmd = wcmdq_tail;
  112. amp_length = 0; // total length of vowel with this amplitude envelope
  113. q = wcmdq[wcmdq_tail];
  114. q[0] = WCMD_AMPLITUDE;
  115. q[1] = 0; // fill in later from amp_length
  116. q[2] = (long)amp_env;
  117. q[3] = amp;
  118. WcmdqInc();
  119. } // end of Synthesize::DoAmplitude
  120. static void DoPitch(unsigned char *env, int pitch1, int pitch2)
  121. {//============================================================
  122. long *q;
  123. EndPitch(0);
  124. if(pitch1 == 1024)
  125. {
  126. // pitch was not set
  127. pitch1 = 24;
  128. pitch2 = 33;
  129. env = envelope_data[PITCHfall];
  130. }
  131. last_pitch_cmd = wcmdq_tail;
  132. pitch_length = 0; // total length of spect with this pitch envelope
  133. if(pitch2 < 0)
  134. pitch2 = 0;
  135. q = wcmdq[wcmdq_tail];
  136. q[0] = WCMD_PITCH;
  137. q[1] = 0; // length, fill in later from pitch_length
  138. q[2] = (long)env;
  139. q[3] = (pitch1 << 16) + pitch2;
  140. WcmdqInc();
  141. } // end of Synthesize::DoPitch
  142. int PauseLength(int pause, int control)
  143. {//====================================
  144. int len;
  145. if(control == 0)
  146. len = (pause * speed_factor1)/256;
  147. else
  148. len = (pause * speed_factor2)/256;
  149. if(len < 5) len = 5; // mS, limit the amount to which pauses can be shortened
  150. return(len);
  151. }
  152. static void DoPause(int length, int control)
  153. {//=========================================
  154. // control = 1, less shortening at fast speeds
  155. int len;
  156. len = PauseLength(length, control);
  157. len = (len * samplerate) / 1000; // convert from mS to number of samples
  158. EndPitch(1);
  159. wcmdq[wcmdq_tail][0] = WCMD_PAUSE;
  160. wcmdq[wcmdq_tail][1] = len;
  161. WcmdqInc();
  162. last_frame = NULL;
  163. } // end of Synthesize::DoPause
  164. extern int seq_len_adjust; // temporary fix to advance the start point for playing the wav sample
  165. static int DoSample2(int index, int which, int length_mod, int amp)
  166. {//================================================================
  167. int length;
  168. int length1;
  169. int format;
  170. int min_length;
  171. int start=0;
  172. long *q;
  173. unsigned char *p;
  174. index = index & 0x7fffff;
  175. p = &wavefile_data[index];
  176. format = p[2];
  177. length1 = (p[1] * 256);
  178. length1 += p[0]; // length in bytes
  179. if(seq_len_adjust > 0)
  180. {
  181. start = (seq_len_adjust * samplerate)/1000;
  182. if(format == 0)
  183. start *= 2;
  184. length1 -= start;
  185. index += start;
  186. }
  187. if(length_mod > 0)
  188. length = (length1 * length_mod) / 256;
  189. else
  190. length = length1;
  191. length = (length * speed_factor2)/256;
  192. min_length = speed_min_sample_len;
  193. if(format==0)
  194. min_length *= 2;
  195. if(length < min_length)
  196. length = min_length;
  197. if(length > length1)
  198. length = length1; // don't exceed wavefile length
  199. if(format==0)
  200. length /= 2; // 2 byte samples
  201. index += 4;
  202. if(amp >= 0)
  203. {
  204. last_wcmdq = wcmdq_tail;
  205. q = wcmdq[wcmdq_tail];
  206. if(which & 0x100)
  207. q[0] = WCMD_WAVE2; // mix this with synthesised wave
  208. else
  209. q[0] = WCMD_WAVE;
  210. q[1] = length; // length in samples
  211. q[2] = long(&wavefile_data[index]);
  212. q[3] = format + (amp << 8);
  213. WcmdqInc();
  214. }
  215. return(length);
  216. } // end of Synthesize::DoSample2
  217. int DoSample(PHONEME_TAB *ph1, PHONEME_TAB *ph2, int which, int length_mod, int amp)
  218. {//====================== ==========================================================
  219. int index;
  220. int match_level;
  221. int amp2;
  222. int result;
  223. EndPitch(1);
  224. index = LookupSound(ph1,ph2,which & 0xff,&match_level,0);
  225. if((index & 0x800000) == 0)
  226. return(0); // not wavefile data
  227. amp2 = wavefile_amp;
  228. if(amp != 0)
  229. amp2 = (amp * wavefile_amp)/20;
  230. if(amp == -1)
  231. amp2 = amp;
  232. result = DoSample2(index,which,length_mod,amp2);
  233. last_frame = NULL;
  234. return(result);
  235. } // end of Synthesize::DoSample
  236. static frame_t *AllocFrame()
  237. {//=========================
  238. // Allocate a temporary spectrum frame for the wavegen queue. Use a pool which is big
  239. // enough to use a round-robin without checks.
  240. // Only needed for modifying spectra for blending to consonants
  241. #define N_FRAME_POOL N_WCMDQ
  242. static int ix=0;
  243. static frame_t frame_pool[N_FRAME_POOL];
  244. ix++;
  245. if(ix >= N_FRAME_POOL)
  246. ix = 0;
  247. return(&frame_pool[ix]);
  248. }
  249. static void set_frame_rms(frame_t *fr, int new_rms)
  250. {//=================================================
  251. // Each frame includes its RMS amplitude value, so to set a new
  252. // RMS just adjust the formant amplitudes by the appropriate ratio
  253. int x;
  254. int h;
  255. int ix;
  256. static const short sqrt_tab[200] = {
  257. 0, 64, 90,110,128,143,156,169,181,192,202,212,221,230,239,247,
  258. 256,263,271,278,286,293,300,306,313,320,326,332,338,344,350,356,
  259. 362,367,373,378,384,389,394,399,404,409,414,419,424,429,434,438,
  260. 443,448,452,457,461,465,470,474,478,483,487,491,495,499,503,507,
  261. 512,515,519,523,527,531,535,539,543,546,550,554,557,561,565,568,
  262. 572,576,579,583,586,590,593,596,600,603,607,610,613,617,620,623,
  263. 627,630,633,636,640,643,646,649,652,655,658,662,665,668,671,674,
  264. 677,680,683,686,689,692,695,698,701,704,706,709,712,715,718,721,
  265. 724,726,729,732,735,738,740,743,746,749,751,754,757,759,762,765,
  266. 768,770,773,775,778,781,783,786,789,791,794,796,799,801,804,807,
  267. 809,812,814,817,819,822,824,827,829,832,834,836,839,841,844,846,
  268. 849,851,853,856,858,861,863,865,868,870,872,875,877,879,882,884,
  269. 886,889,891,893,896,898,900,902};
  270. if(fr->rms == 0) return; // check for divide by zero
  271. x = (new_rms * 64)/fr->rms;
  272. if(x >= 200) x = 199;
  273. x = sqrt_tab[x]; // sqrt(new_rms/fr->rms)*0x200;
  274. for(ix=0; ix<N_PEAKS; ix++)
  275. {
  276. h = fr->fheight[ix] * x;
  277. fr->fheight[ix] = h/0x200;
  278. }
  279. } /* end of set_frame_rms */
  280. static void formants_reduce_hf(frame_t *fr, int level)
  281. {//====================================================
  282. // change height of peaks 2 to 8, percentage
  283. int ix;
  284. int x;
  285. for(ix=2; ix<N_PEAKS; ix++)
  286. {
  287. x = fr->fheight[ix] * level;
  288. fr->fheight[ix] = x/100;
  289. }
  290. }
  291. static frame_t *CopyFrame(frame_t *frame1, int copy)
  292. {//=================================================
  293. // create a copy of the specified frame in temporary buffer
  294. frame_t *frame2;
  295. if((copy==0) && (frame1->frflags & FRFLAG_COPIED))
  296. {
  297. // this frame has already been copied in temporary rw memory
  298. return(frame1);
  299. }
  300. frame2 = AllocFrame();
  301. if(frame2 != NULL)
  302. {
  303. memcpy(frame2,frame1,sizeof(frame_t));
  304. frame2->length = 0;
  305. frame2->frflags |= FRFLAG_COPIED;
  306. }
  307. return(frame2);
  308. }
  309. static frame_t *DuplicateLastFrame(frameref_t *seq, int n_frames, int length)
  310. {//==========================================================================
  311. frame_t *fr;
  312. seq[n_frames-1].length = length;
  313. fr = CopyFrame(seq[n_frames-1].frame,1);
  314. seq[n_frames].frame = fr;
  315. seq[n_frames].length = 0;
  316. return fr;
  317. }
  318. static void AdjustFormants(frame_t *fr, int target, int min, int max, int f1_adj, int f3_adj, int hf_reduce, int flags)
  319. {//====================================================================================================================
  320. int x;
  321. //hf_reduce = 70; // ?? using fixed amount rather than the parameter??
  322. target = (target * voice->formant_factor)/256;
  323. x = (target - fr->ffreq[2]) / 2;
  324. if(x > max) x = max;
  325. if(x < min) x = min;
  326. fr->ffreq[2] += x;
  327. fr->ffreq[3] += f3_adj;
  328. if(flags & 0x20)
  329. {
  330. f3_adj = -f3_adj; //. reverse direction for f4,f5 change
  331. }
  332. fr->ffreq[4] += f3_adj;
  333. fr->ffreq[5] += f3_adj;
  334. if(f1_adj==1)
  335. {
  336. x = (235 - fr->ffreq[1]);
  337. if(x < -100) x = -100;
  338. if(x > -60) x = -60;
  339. fr->ffreq[1] += x;
  340. }
  341. if(f1_adj==2)
  342. {
  343. x = (235 - fr->ffreq[1]);
  344. if(x < -300) x = -300;
  345. if(x > -150) x = -150;
  346. fr->ffreq[1] += x;
  347. fr->ffreq[0] += x;
  348. }
  349. if(f1_adj==3)
  350. {
  351. x = (100 - fr->ffreq[1]);
  352. if(x < -400) x = -400;
  353. if(x > -300) x = -400;
  354. fr->ffreq[1] += x;
  355. fr->ffreq[0] += x;
  356. }
  357. formants_reduce_hf(fr,hf_reduce);
  358. }
  359. int VowelCloseness(frame_t *fr)
  360. {//============================
  361. // return a value 0-3 depending on the vowel's f1
  362. int f1;
  363. if((f1 = fr->ffreq[1]) < 300)
  364. return(3);
  365. if(f1 < 400)
  366. return(2);
  367. if(f1 < 500)
  368. return(1);
  369. return(0);
  370. }
  371. int FormantTransition2(frameref_t *seq, int &n_frames, unsigned int data1, unsigned int data2, PHONEME_TAB *other_ph, int which)
  372. {//==============================================================================================================================
  373. int ix;
  374. int formant;
  375. int next_rms;
  376. int len;
  377. int rms;
  378. int f1;
  379. int f2;
  380. int f2_min;
  381. int f2_max;
  382. int f3_adj;
  383. int f3_amp;
  384. int flags;
  385. int vcolour;
  386. #define N_VCOLOUR 2
  387. // percentage change for each formant in 256ths
  388. static short vcolouring[N_VCOLOUR][5] = {
  389. {243,272,256,256,256}, // palatal consonant follows
  390. {256,256,240,240,240}, // retroflex
  391. };
  392. frame_t *fr = NULL;
  393. if(n_frames < 2)
  394. return(0);
  395. len = (data1 & 0x3f) * 2;
  396. rms = (data1 >> 6) & 0x3f;
  397. flags = (data1 >> 12);
  398. f2 = (data2 & 0x3f) * 50;
  399. f2_min = (((data2 >> 6) & 0x1f) - 15) * 50;
  400. f2_max = (((data2 >> 11) & 0x1f) - 15) * 50;
  401. f3_adj = (((data2 >> 16) & 0x1f) - 15) * 50;
  402. f3_amp = ((data2 >> 21) & 0x1f) * 8;
  403. f1 = ((data2 >> 26) & 0x7);
  404. vcolour = (data2 >> 29);
  405. // fprintf(stderr,"FMT%d %3s %3d-%3d f1=%d f2=%4d %4d %4d f3=%4d %3d\n",
  406. // which,WordToString(other_ph->mnemonic),len,rms,f1,f2,f2_min,f2_max,f3_adj,f3_amp);
  407. if(other_ph->mnemonic == '?')
  408. flags |= 8;
  409. if(which == 1)
  410. {
  411. /* entry to vowel */
  412. fr = CopyFrame(seq[0].frame,0);
  413. seq[0].frame = fr;
  414. seq[0].length = VOWEL_FRONT_LENGTH;
  415. if(len > 0)
  416. seq[0].length = len;
  417. seq[0].frflags |= FRFLAG_LEN_MOD; // reduce length modification
  418. fr->frflags |= FRFLAG_LEN_MOD;
  419. next_rms = seq[1].frame->rms;
  420. if(f2 != 0)
  421. {
  422. if(rms & 0x20)
  423. {
  424. set_frame_rms(fr,(next_rms * (rms & 0x1f))/30);
  425. }
  426. AdjustFormants(fr, f2, f2_min, f2_max, f1, f3_adj, f3_amp, flags);
  427. if((rms & 0x20) == 0)
  428. {
  429. set_frame_rms(fr,rms*2);
  430. }
  431. }
  432. else
  433. {
  434. if(flags & 8)
  435. set_frame_rms(fr,(next_rms*24)/32);
  436. else
  437. set_frame_rms(fr,RMS_START);
  438. }
  439. if(flags & 8)
  440. {
  441. // set_frame_rms(fr,next_rms - 5);
  442. modn_flags = 0x800 + (VowelCloseness(fr) << 8);
  443. }
  444. }
  445. else
  446. {
  447. // exit from vowel
  448. rms = rms*2;
  449. if((f2 != 0) || (flags != 0))
  450. {
  451. if(flags & 8)
  452. {
  453. fr = CopyFrame(seq[n_frames-1].frame,0);
  454. seq[n_frames-1].frame = fr;
  455. rms = RMS_GLOTTAL1;
  456. // degree of glottal-stop effect depends on closeness of vowel (indicated by f1 freq)
  457. modn_flags = 0x400 + (VowelCloseness(fr) << 8);
  458. }
  459. else
  460. {
  461. fr = DuplicateLastFrame(seq,n_frames++,len);
  462. if(len > 36)
  463. seq_len_adjust += (len - 36);
  464. if(f2 != 0)
  465. {
  466. AdjustFormants(fr, f2, f2_min, f2_max, f1, f3_adj, f3_amp, flags);
  467. }
  468. }
  469. set_frame_rms(fr,rms);
  470. if((vcolour > 0) && (vcolour <= N_VCOLOUR))
  471. {
  472. for(ix=0; ix<n_frames; ix++)
  473. {
  474. fr = CopyFrame(seq[ix].frame,0);
  475. seq[ix].frame = fr;
  476. for(formant=1; formant<=5; formant++)
  477. {
  478. int x;
  479. x = fr->ffreq[formant] * vcolouring[vcolour-1][formant-1];
  480. fr->ffreq[formant] = x / 256;
  481. }
  482. }
  483. }
  484. }
  485. }
  486. if(fr != NULL)
  487. {
  488. if(flags & 4)
  489. fr->frflags |= FRFLAG_FORMANT_RATE;
  490. if(flags & 2)
  491. fr->frflags |= FRFLAG_BREAK; // don't merge with next frame
  492. }
  493. if(flags & 0x40)
  494. DoPause(12,0); // add a short pause after the consonant
  495. if(flags & 16)
  496. return(len);
  497. return(0);
  498. } // end of FormantTransition2
  499. static void SmoothSpect(void)
  500. {//==========================
  501. // Limit the rate of frequence change of formants, to reduce chirping
  502. long *q;
  503. frame_t *frame;
  504. frame_t *frame2;
  505. frame_t *frame1;
  506. frame_t *frame_centre;
  507. int ix;
  508. int len;
  509. int pk;
  510. int modified;
  511. int allowed;
  512. int diff;
  513. if(syllable_start == syllable_end)
  514. return;
  515. if((syllable_centre < 0) || (syllable_centre == syllable_start))
  516. {
  517. syllable_start = syllable_end;
  518. return;
  519. }
  520. q = wcmdq[syllable_centre];
  521. frame_centre = (frame_t *)q[2];
  522. // backwards
  523. ix = syllable_centre -1;
  524. frame = frame2 = frame_centre;
  525. for(;;)
  526. {
  527. if(ix < 0) ix = N_WCMDQ-1;
  528. q = wcmdq[ix];
  529. if(q[0] == WCMD_PAUSE || q[0] == WCMD_WAVE)
  530. break;
  531. if(q[0] == WCMD_SPECT || q[0] == WCMD_SPECT2)
  532. {
  533. len = q[1] & 0xffff;
  534. frame1 = (frame_t *)q[3];
  535. if(frame1 == frame)
  536. {
  537. q[3] = (long)frame2;
  538. frame1 = frame2;
  539. }
  540. else
  541. break; // doesn't follow on from previous frame
  542. frame = frame2 = (frame_t *)q[2];
  543. modified = 0;
  544. if(frame->frflags & FRFLAG_BREAK)
  545. break;
  546. if(frame->frflags & FRFLAG_FORMANT_RATE)
  547. len = (len * 12)/10; // allow slightly greater rate of change for this frame (was 12/10)
  548. for(pk=0; pk<6; pk++)
  549. {
  550. int f1, f2;
  551. if((frame->frflags & FRFLAG_BREAK_LF) && (pk < 3))
  552. continue;
  553. f1 = frame1->ffreq[pk];
  554. f2 = frame->ffreq[pk];
  555. // backwards
  556. if((diff = f2 - f1) > 0)
  557. {
  558. allowed = f1*2 + f2;
  559. }
  560. else
  561. {
  562. allowed = f1 + f2*2;
  563. }
  564. // the allowed change is specified as percentage (%*10) of the frequency
  565. // take "frequency" as 1/3 from the lower freq
  566. allowed = (allowed * formant_rate[pk])/3000;
  567. allowed = (allowed * len)/256;
  568. if(diff > allowed)
  569. {
  570. if(modified == 0)
  571. {
  572. frame2 = CopyFrame(frame,0);
  573. modified = 1;
  574. }
  575. frame2->ffreq[pk] = frame1->ffreq[pk] + allowed;
  576. q[2] = (long)frame2;
  577. }
  578. else
  579. if(diff < -allowed)
  580. {
  581. if(modified == 0)
  582. {
  583. frame2 = CopyFrame(frame,0);
  584. modified = 1;
  585. }
  586. frame2->ffreq[pk] = frame1->ffreq[pk] - allowed;
  587. q[2] = (long)frame2;
  588. }
  589. }
  590. }
  591. if(ix == syllable_start)
  592. break;
  593. ix--;
  594. }
  595. // forwards
  596. ix = syllable_centre;
  597. frame = NULL;
  598. for(;;)
  599. {
  600. q = wcmdq[ix];
  601. if(q[0] == WCMD_PAUSE || q[0] == WCMD_WAVE)
  602. break;
  603. if(q[0] == WCMD_SPECT || q[0] == WCMD_SPECT2)
  604. {
  605. len = q[1] & 0xffff;
  606. frame1 = (frame_t *)q[2];
  607. if(frame != NULL)
  608. {
  609. if(frame1 == frame)
  610. {
  611. q[2] = (long)frame2;
  612. frame1 = frame2;
  613. }
  614. else
  615. break; // doesn't follow on from previous frame
  616. }
  617. frame = frame2 = (frame_t *)q[3];
  618. modified = 0;
  619. if(frame1->frflags & FRFLAG_BREAK)
  620. break;
  621. if(frame1->frflags & FRFLAG_FORMANT_RATE)
  622. len = (len *6)/5; // allow slightly greater rate of change for this frame
  623. for(pk=0; pk<6; pk++)
  624. {
  625. int f1, f2;
  626. f1 = frame1->ffreq[pk];
  627. f2 = frame->ffreq[pk];
  628. // forwards
  629. if((diff = f2 - f1) > 0)
  630. {
  631. allowed = f1*2 + f2;
  632. }
  633. else
  634. {
  635. allowed = f1 + f2*2;
  636. }
  637. allowed = (allowed * formant_rate[pk])/3000;
  638. allowed = (allowed * len)/256;
  639. if(diff > allowed)
  640. {
  641. if(modified == 0)
  642. {
  643. frame2 = CopyFrame(frame,0);
  644. modified = 1;
  645. }
  646. frame2->ffreq[pk] = frame1->ffreq[pk] + allowed;
  647. q[3] = (long)frame2;
  648. }
  649. else
  650. if(diff < -allowed)
  651. {
  652. if(modified == 0)
  653. {
  654. frame2 = CopyFrame(frame,0);
  655. modified = 1;
  656. }
  657. frame2->ffreq[pk] = frame1->ffreq[pk] - allowed;
  658. q[3] = (long)frame2;
  659. }
  660. }
  661. }
  662. ix++;
  663. if(ix >= N_WCMDQ) ix = 0;
  664. if(ix == syllable_end)
  665. break;
  666. }
  667. syllable_start = syllable_end;
  668. }
  669. static void StartSyllable(void)
  670. {//============================
  671. // start of syllable, if not already started
  672. if(syllable_end == syllable_start)
  673. syllable_end = wcmdq_tail;
  674. }
  675. int DoSpect(PHONEME_TAB *this_ph, PHONEME_TAB *prev_ph, PHONEME_TAB *next_ph,
  676. int which, PHONEME_LIST *plist, int modulation)
  677. {//===================================================================================
  678. // which 1 start of phoneme, 2 body and end
  679. // length_mod: 256 = 100%
  680. // modulation: -1 = don't write to wcmdq
  681. int n_frames;
  682. frameref_t *frames;
  683. int frameix;
  684. frame_t *frame1;
  685. frame_t *frame2;
  686. frame_t *fr;
  687. int ix;
  688. long *q;
  689. int len;
  690. int match_level;
  691. int frame_length;
  692. int frame1_length;
  693. int frame2_length;
  694. int length_factor;
  695. int length_mod;
  696. int total_len = 0;
  697. static int wave_flag = 0;
  698. int wcmd_spect = WCMD_SPECT;
  699. length_mod = plist->length;
  700. if(length_mod==0) length_mod=256;
  701. if(which==1)
  702. {
  703. // limit the shortening of sonorants before shortened (eg. unstressed vowels)
  704. if((this_ph->type==phLIQUID) || (prev_ph->type==phLIQUID) || (prev_ph->type==phNASAL))
  705. {
  706. if(length_mod < (len = translator->langopts.param[LOPT_SONORANT_MIN]))
  707. {
  708. length_mod = len;
  709. }
  710. }
  711. }
  712. modn_flags = 0;
  713. frames = LookupSpect(this_ph,prev_ph,next_ph,which,&match_level,&n_frames, plist);
  714. if(frames == NULL)
  715. return(0); // not found
  716. if(wavefile_ix == 0)
  717. {
  718. if(wave_flag)
  719. {
  720. // cancel any wavefile that was playing previously
  721. wcmd_spect = WCMD_SPECT2;
  722. wave_flag = 0;
  723. }
  724. else
  725. {
  726. wcmd_spect = WCMD_SPECT;
  727. }
  728. }
  729. frame1 = frames[0].frame;
  730. frame1_length = frames[0].length;
  731. if(last_frame != NULL)
  732. {
  733. if(((last_frame->length < 2) || (last_frame->frflags & FRFLAG_VOWEL_CENTRE))
  734. && !(last_frame->frflags & FRFLAG_BREAK))
  735. {
  736. // last frame of previous sequence was zero-length, replace with first of this sequence
  737. wcmdq[last_wcmdq][3] = (long)frame1;
  738. if(last_frame->frflags & FRFLAG_BREAK_LF)
  739. {
  740. // but flag indicates keep HF peaks in last segment
  741. fr = CopyFrame(frame1,1);
  742. for(ix=3; ix<N_PEAKS; ix++)
  743. {
  744. fr->ffreq[ix] = last_frame->ffreq[ix];
  745. fr->fheight[ix] = last_frame->fheight[ix];
  746. }
  747. wcmdq[last_wcmdq][3] = (long)fr;
  748. }
  749. }
  750. }
  751. if((this_ph->type == phVOWEL) && (which == 2))
  752. {
  753. SmoothSpect(); // process previous syllable
  754. // remember the point in the output queue of the centre of the vowel
  755. syllable_centre = wcmdq_tail;
  756. }
  757. frame_length = frame1_length;
  758. for(frameix=1; frameix<n_frames; frameix++)
  759. {
  760. frame2 = frames[frameix].frame;
  761. frame2_length = frames[frameix].length;
  762. if((wavefile_ix != 0) && ((frame1->frflags & FRFLAG_DEFER_WAV)==0))
  763. {
  764. // there is a wave file to play along with this synthesis
  765. seq_len_adjust = 0;
  766. DoSample2(wavefile_ix,which+0x100,0,wavefile_amp);
  767. wave_flag = 1;
  768. wavefile_ix = 0;
  769. }
  770. length_factor = length_mod;
  771. if(frame1->frflags & FRFLAG_LEN_MOD) // reduce effect of length mod
  772. {
  773. length_factor = (length_mod*4 + 256*3)/7;
  774. }
  775. len = (frame_length * samplerate)/1000;
  776. len = (len * length_factor)/256;
  777. if(modulation >= 0)
  778. {
  779. if(frame1->frflags & FRFLAG_MODULATE)
  780. {
  781. modulation = 6;
  782. }
  783. if((frameix == n_frames-1) && (modn_flags & 0xf00))
  784. modulation |= modn_flags; // before or after a glottal stop
  785. }
  786. pitch_length += len;
  787. amp_length += len;
  788. if(frame_length < 2)
  789. {
  790. last_frame = NULL;
  791. frame_length = frame2_length;
  792. frame1 = frame2;
  793. }
  794. else
  795. {
  796. last_wcmdq = wcmdq_tail;
  797. if(modulation >= 0)
  798. {
  799. q = wcmdq[wcmdq_tail];
  800. q[0] = wcmd_spect;
  801. q[1] = len + (modulation << 16);
  802. q[2] = long(frame1);
  803. q[3] = long(frame2);
  804. WcmdqInc();
  805. }
  806. last_frame = frame1 = frame2;
  807. frame_length = frame2_length;
  808. total_len += len;
  809. }
  810. }
  811. return(total_len);
  812. } // end of Synthesize::DoSpect
  813. static void DoMarker(int type, int char_posn, int length, int value)
  814. {//=================================================================
  815. // This could be used to return an index to the word currently being spoken
  816. // Type 1=word, 2=sentence, 3=named marker, 4=play audio, 5=end
  817. wcmdq[wcmdq_tail][0] = WCMD_MARKER;
  818. wcmdq[wcmdq_tail][1] = type;
  819. wcmdq[wcmdq_tail][2] = (char_posn & 0xffffff) | (length << 24);
  820. wcmdq[wcmdq_tail][3] = value;
  821. WcmdqInc();
  822. } // end of Synthesize::DoMarker
  823. void DoVoiceChange(voice_t *v)
  824. {//===========================
  825. // allocate memory for a copy of the voice data, and free it in wavegenfill()
  826. voice_t *v2;
  827. v2 = (voice_t *)malloc(sizeof(voice_t));
  828. memcpy(v2,v,sizeof(voice_t));
  829. wcmdq[wcmdq_tail][0] = WCMD_VOICE;
  830. wcmdq[wcmdq_tail][1] = (long)(v2);
  831. WcmdqInc();
  832. }
  833. static void DoEmbedded(int &embix, int sourceix)
  834. {//=============================================
  835. // There were embedded commands in the text at this point
  836. unsigned int word; // bit 7=last command for this word, bits 5,6 sign, bits 0-4 command
  837. unsigned int value;
  838. int command;
  839. do {
  840. word = embedded_list[embix++];
  841. value = word >> 8;
  842. command = word & 0x7f;
  843. switch(command & 0x1f)
  844. {
  845. case EMBED_S: // speed
  846. SetEmbedded((command & 0x60) + EMBED_S2,value); // adjusts embedded_value[EMBED_S2]
  847. SetSpeed(2);
  848. break;
  849. case EMBED_I: // play dynamically loaded wav data (sound icon)
  850. if((int)value < n_soundicon_tab)
  851. {
  852. if(soundicon_tab[value].length != 0)
  853. {
  854. DoPause(10,0); // ensure a break in the speech
  855. wcmdq[wcmdq_tail][0] = WCMD_WAVE;
  856. wcmdq[wcmdq_tail][1] = soundicon_tab[value].length;
  857. wcmdq[wcmdq_tail][2] = (long)soundicon_tab[value].data + 44; // skip WAV header
  858. wcmdq[wcmdq_tail][3] = 0x1500; // 16 bit data, amp=21
  859. WcmdqInc();
  860. }
  861. }
  862. break;
  863. case EMBED_M: // named marker
  864. DoMarker(espeakEVENT_MARK, (sourceix & 0x7ff) + clause_start_char, 0, value);
  865. break;
  866. case EMBED_U: // play sound
  867. DoMarker(espeakEVENT_PLAY, count_characters+1, 0, value); // always occurs at end of clause
  868. break;
  869. default:
  870. DoPause(10,0); // ensure a break in the speech
  871. wcmdq[wcmdq_tail][0] = WCMD_EMBEDDED;
  872. wcmdq[wcmdq_tail][1] = command;
  873. wcmdq[wcmdq_tail][2] = value;
  874. WcmdqInc();
  875. break;
  876. }
  877. } while ((word & 0x80) == 0);
  878. }
  879. void SwitchDictionary()
  880. {//====================
  881. }
  882. int Generate(PHONEME_LIST *phoneme_list, int *n_ph, int resume)
  883. {//============================================================
  884. static int ix;
  885. static int embedded_ix;
  886. static int word_count;
  887. PHONEME_LIST *prev;
  888. PHONEME_LIST *next;
  889. PHONEME_LIST *next2;
  890. PHONEME_LIST *p;
  891. int released;
  892. int stress;
  893. int modulation;
  894. int pre_voiced;
  895. int free_min;
  896. unsigned char *pitch_env=NULL;
  897. unsigned char *amp_env;
  898. PHONEME_TAB *ph;
  899. PHONEME_TAB *prev_ph;
  900. static int sourceix=0;
  901. #ifdef TEST_MBROLA
  902. if(mbrola_name[0] != 0)
  903. return(MbrolaGenerate(phoneme_list,n_ph,resume));
  904. #endif
  905. if(option_quiet)
  906. return(0);
  907. if(resume == 0)
  908. {
  909. ix = 1;
  910. embedded_ix=0;
  911. word_count = 0;
  912. pitch_length = 0;
  913. amp_length = 0;
  914. last_frame = NULL;
  915. last_wcmdq = -1;
  916. syllable_start = wcmdq_tail;
  917. syllable_end = wcmdq_tail;
  918. syllable_centre = -1;
  919. last_pitch_cmd = -1;
  920. memset(vowel_transition,0,sizeof(vowel_transition));
  921. }
  922. while(ix < (*n_ph))
  923. {
  924. p = &phoneme_list[ix];
  925. if(p->type == phPAUSE)
  926. free_min = 5;
  927. else
  928. if(p->type != phVOWEL)
  929. free_min = 10; // we need less Q space for non-vowels, and we need to generate phonemes after a vowel so that the pitch_length is filled in
  930. else
  931. free_min = MIN_WCMDQ; // 22
  932. if(WcmdqFree() <= free_min)
  933. return(1); // wait
  934. prev = &phoneme_list[ix-1];
  935. next = &phoneme_list[ix+1];
  936. next2 = &phoneme_list[ix+2];
  937. if(p->synthflags & SFLAG_EMBEDDED)
  938. {
  939. DoEmbedded(embedded_ix, p->sourceix);
  940. }
  941. if(p->newword)
  942. {
  943. if(translator->langopts.param[LOPT_WORD_MERGE] == 0)
  944. last_frame = NULL;
  945. sourceix = (p->sourceix & 0x7ff) + clause_start_char;
  946. if(p->newword & 4)
  947. DoMarker(espeakEVENT_SENTENCE, sourceix, 0, count_sentences); // start of sentence
  948. // if(p->newword & 2)
  949. // DoMarker(espeakEVENT_END, count_characters, 0, count_sentences); // end of clause
  950. if(p->newword & 1)
  951. DoMarker(espeakEVENT_WORD, sourceix, p->sourceix >> 11, clause_start_word + word_count++);
  952. }
  953. EndAmplitude();
  954. if(p->prepause > 0)
  955. DoPause(p->prepause,1);
  956. if(option_phoneme_events && (p->type != phVOWEL))
  957. {
  958. // Note, for vowels, do the phoneme event after the vowel-start
  959. DoMarker(espeakEVENT_PHONEME, sourceix, 0, p->ph->mnemonic);
  960. }
  961. switch(p->type)
  962. {
  963. case phPAUSE:
  964. DoPause(p->length,0);
  965. break;
  966. case phSTOP:
  967. released = 0;
  968. if(next->type==phVOWEL) released = 1;
  969. if(next->type==phLIQUID && !next->newword) released = 1;
  970. if(released)
  971. DoSample(p->ph,next->ph,2,0,0);
  972. else
  973. DoSample(p->ph,phoneme_tab[phonPAUSE],2,0,0);
  974. break;
  975. case phFRICATIVE:
  976. if(p->synthflags & SFLAG_LENGTHEN)
  977. DoSample(p->ph,next->ph,2,p->length,0); // play it twice for [s:] etc.
  978. DoSample(p->ph,next->ph,2,p->length,0);
  979. break;
  980. case phVSTOP:
  981. pre_voiced = 0;
  982. if(next->type==phVOWEL)
  983. {
  984. DoAmplitude(p->amp,NULL);
  985. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  986. pre_voiced = 1;
  987. }
  988. else
  989. if((next->type==phLIQUID) && !next->newword)
  990. {
  991. DoAmplitude(next->amp,NULL);
  992. DoPitch(envelope_data[next->env],next->pitch1,next->pitch2);
  993. pre_voiced = 1;
  994. }
  995. else
  996. {
  997. if(last_pitch_cmd < 0)
  998. {
  999. DoAmplitude(next->amp,NULL);
  1000. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  1001. }
  1002. }
  1003. if((prev->type==phVOWEL) || (prev->ph->phflags & phVOWEL2))
  1004. {
  1005. // a period of voicing before the release
  1006. DoSpect(p->ph,phoneme_tab[phonSCHWA],next->ph,1,p,0);
  1007. if(p->synthflags & SFLAG_LENGTHEN)
  1008. {
  1009. DoPause(20,0);
  1010. DoSpect(p->ph,phoneme_tab[phonSCHWA],next->ph,1,p,0);
  1011. }
  1012. }
  1013. else
  1014. {
  1015. if(p->synthflags & SFLAG_LENGTHEN)
  1016. {
  1017. DoPause(50,0);
  1018. }
  1019. }
  1020. if(pre_voiced)
  1021. {
  1022. // followed by a vowel, or liquid + vowel
  1023. StartSyllable();
  1024. DoSpect(p->ph,prev->ph,next->ph,2,p,0);
  1025. }
  1026. else
  1027. {
  1028. // if((prev->type != phVOWEL) && ((prev->ph->phflags & phVOICED)==0) && ((next->ph->phflags & phVOICED)==0))
  1029. // DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE_SHORT],2,p,0);
  1030. // else
  1031. DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,0);
  1032. // DoSpect(p->ph,prev->ph,next->ph,2,p,0);
  1033. }
  1034. break;
  1035. case phVFRICATIVE:
  1036. if(next->type==phVOWEL)
  1037. {
  1038. DoAmplitude(p->amp,NULL);
  1039. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  1040. }
  1041. else
  1042. if(next->type==phLIQUID)
  1043. {
  1044. DoAmplitude(next->amp,NULL);
  1045. DoPitch(envelope_data[next->env],next->pitch1,next->pitch2);
  1046. }
  1047. else
  1048. {
  1049. if(last_pitch_cmd < 0)
  1050. {
  1051. DoAmplitude(p->amp,NULL);
  1052. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  1053. }
  1054. }
  1055. if((next->type==phVOWEL) || ((next->type==phLIQUID)) && (next->newword==0)) // ?? test 14.Aug.2007
  1056. {
  1057. StartSyllable();
  1058. if(p->synthflags & SFLAG_LENGTHEN)
  1059. DoSpect(p->ph,prev->ph,next->ph,2,p,0);
  1060. DoSpect(p->ph,prev->ph,next->ph,2,p,0);
  1061. }
  1062. else
  1063. {
  1064. if(p->synthflags & SFLAG_LENGTHEN)
  1065. DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,0);
  1066. DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,0);
  1067. }
  1068. break;
  1069. case phNASAL:
  1070. if(!(p->synthflags & SFLAG_SEQCONTINUE))
  1071. {
  1072. DoAmplitude(p->amp,NULL);
  1073. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  1074. }
  1075. if(prev->type==phNASAL)
  1076. {
  1077. last_frame = NULL;
  1078. }
  1079. if(next->type==phVOWEL)
  1080. {
  1081. StartSyllable();
  1082. DoSpect(p->ph,prev->ph,next->ph,1,p,0);
  1083. }
  1084. else
  1085. if(prev->type==phVOWEL && (p->synthflags & SFLAG_SEQCONTINUE))
  1086. {
  1087. DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,0);
  1088. }
  1089. else
  1090. {
  1091. last_frame = NULL; // only for nasal ?
  1092. if(next->type == phLIQUID)
  1093. DoSpect(p->ph,prev->ph,phoneme_tab[phonSONORANT],2,p,0);
  1094. else
  1095. DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,0);
  1096. last_frame = NULL;
  1097. }
  1098. break;
  1099. case phLIQUID:
  1100. modulation = 0;
  1101. if(p->ph->phflags & phTRILL)
  1102. modulation = 5;
  1103. prev_ph = prev->ph;
  1104. // if(p->newword)
  1105. // prev_ph = phoneme_tab[phonPAUSE]; // pronounce fully at the start of a word
  1106. if(!(p->synthflags & SFLAG_SEQCONTINUE))
  1107. {
  1108. DoAmplitude(p->amp,NULL);
  1109. DoPitch(envelope_data[p->env],p->pitch1,p->pitch2);
  1110. }
  1111. if(prev->type==phNASAL)
  1112. {
  1113. last_frame = NULL;
  1114. }
  1115. if(next->type==phVOWEL)
  1116. {
  1117. StartSyllable();
  1118. DoSpect(p->ph,prev_ph,next->ph,1,p,modulation); // (,)r
  1119. }
  1120. else
  1121. if(prev->type==phVOWEL && (p->synthflags & SFLAG_SEQCONTINUE))
  1122. {
  1123. DoSpect(p->ph,prev_ph,next->ph,1,p,modulation);
  1124. }
  1125. else
  1126. {
  1127. DoSpect(p->ph,prev_ph,next->ph,1,p,modulation);
  1128. }
  1129. break;
  1130. case phVOWEL:
  1131. ph = p->ph;
  1132. stress = p->tone & 0xf;
  1133. // vowel transition from the preceding phoneme
  1134. vowel_transition0 = vowel_transition[0];
  1135. vowel_transition1 = vowel_transition[1];
  1136. pitch_env = envelope_data[p->env];
  1137. amp_env = NULL;
  1138. if(p->tone_ph != 0)
  1139. {
  1140. pitch_env = LookupEnvelope(phoneme_tab[p->tone_ph]->spect);
  1141. amp_env = LookupEnvelope(phoneme_tab[p->tone_ph]->after);
  1142. }
  1143. StartSyllable();
  1144. modulation = 2;
  1145. if(stress <= 1)
  1146. modulation = 1; // 16ths
  1147. else
  1148. if(stress >= 7)
  1149. modulation = 3;
  1150. if(prev->type == phVSTOP || prev->type == phVFRICATIVE)
  1151. {
  1152. DoAmplitude(p->amp,amp_env);
  1153. DoPitch(pitch_env,p->pitch1,p->pitch2); // don't use prevocalic rising tone
  1154. DoSpect(ph,prev->ph,next->ph,1,p,modulation);
  1155. }
  1156. else
  1157. if(prev->type==phLIQUID || prev->type==phNASAL)
  1158. {
  1159. DoAmplitude(p->amp,amp_env);
  1160. DoSpect(ph,prev->ph,next->ph,1,p,modulation); // continue with pre-vocalic rising tone
  1161. DoPitch(pitch_env,p->pitch1,p->pitch2);
  1162. }
  1163. else
  1164. {
  1165. if(!(p->synthflags & SFLAG_SEQCONTINUE))
  1166. {
  1167. DoAmplitude(p->amp,amp_env);
  1168. DoPitch(pitch_env,p->pitch1,p->pitch2);
  1169. }
  1170. DoSpect(ph,prev->ph,next->ph,1,p,modulation);
  1171. }
  1172. if(option_phoneme_events)
  1173. {
  1174. DoMarker(espeakEVENT_PHONEME, sourceix, 0, p->ph->mnemonic);
  1175. }
  1176. DoSpect(p->ph,prev->ph,next->ph,2,p,modulation);
  1177. memset(vowel_transition,0,sizeof(vowel_transition));
  1178. break;
  1179. }
  1180. ix++;
  1181. }
  1182. EndPitch(1);
  1183. if(*n_ph > 0)
  1184. {
  1185. DoMarker(espeakEVENT_END, count_characters, 0, count_sentences); // end of clause
  1186. *n_ph = 0;
  1187. }
  1188. return(0); // finished the phoneme list
  1189. } // end of Generate
  1190. static int timer_on = 0;
  1191. static int paused = 0;
  1192. int SynthOnTimer()
  1193. {//===============
  1194. if(!timer_on)
  1195. {
  1196. return(WavegenCloseSound());
  1197. }
  1198. do {
  1199. if(Generate(phoneme_list,&n_phoneme_list,1)==0)
  1200. {
  1201. SpeakNextClause(NULL,NULL,1);
  1202. }
  1203. } while(skipping_text);
  1204. return(0);
  1205. }
  1206. int SynthStatus()
  1207. {//==============
  1208. return(timer_on | paused);
  1209. }
  1210. int SpeakNextClause(FILE *f_in, const void *text_in, int control)
  1211. {//==============================================================
  1212. // Speak text from file (f_in) or memory (text_in)
  1213. // control 0: start
  1214. // either f_in or text_in is set, the other must be NULL
  1215. // The other calls have f_in and text_in = NULL
  1216. // control 1: speak next text
  1217. // 2: stop
  1218. // 3: pause (toggle)
  1219. // 4: is file being read (0=no, 1=yes)
  1220. // 5: interrupt and flush current text.
  1221. int clause_tone;
  1222. char *voice_change;
  1223. static FILE *f_text=NULL;
  1224. static const void *p_text=NULL;
  1225. if(control == 4)
  1226. {
  1227. if((f_text == NULL) && (p_text == NULL))
  1228. return(0);
  1229. else
  1230. return(1);
  1231. }
  1232. if(control == 2)
  1233. {
  1234. // stop speaking
  1235. timer_on = 0;
  1236. p_text = NULL;
  1237. if(f_text != NULL)
  1238. {
  1239. fclose(f_text);
  1240. f_text=NULL;
  1241. }
  1242. n_phoneme_list = 0;
  1243. WcmdqStop();
  1244. return(0);
  1245. }
  1246. if(control == 3)
  1247. {
  1248. // toggle pause
  1249. if(paused == 0)
  1250. {
  1251. timer_on = 0;
  1252. paused = 2;
  1253. }
  1254. else
  1255. {
  1256. WavegenOpenSound();
  1257. timer_on = 1;
  1258. paused = 0;
  1259. Generate(phoneme_list,&n_phoneme_list,0); // re-start from beginning of clause
  1260. }
  1261. return(0);
  1262. }
  1263. if(control == 5)
  1264. {
  1265. // stop speaking, but continue looking for text
  1266. n_phoneme_list = 0;
  1267. WcmdqStop();
  1268. return(0);
  1269. }
  1270. if((f_in != NULL) || (text_in != NULL))
  1271. {
  1272. f_text = f_in;
  1273. p_text = text_in;
  1274. timer_on = 1;
  1275. paused = 0;
  1276. }
  1277. if((f_text==NULL) && (p_text==NULL))
  1278. {
  1279. skipping_text = 0;
  1280. timer_on = 0;
  1281. return(0);
  1282. }
  1283. if((f_text != NULL) && feof(f_text))
  1284. {
  1285. timer_on = 0;
  1286. fclose(f_text);
  1287. f_text=NULL;
  1288. return(0);
  1289. }
  1290. if(current_phoneme_table != voice->phoneme_tab_ix)
  1291. {
  1292. SelectPhonemeTable(voice->phoneme_tab_ix);
  1293. }
  1294. // read the next clause from the input text file, translate it, and generate
  1295. // entries in the wavegen command queue
  1296. p_text = translator->TranslateClause(f_text,p_text,&clause_tone,&voice_change);
  1297. translator->CalcPitches(clause_tone);
  1298. translator->CalcLengths();
  1299. translator->GetTranslatedPhonemeString(translator->phon_out,sizeof(translator->phon_out));
  1300. if(option_phonemes > 0)
  1301. {
  1302. fprintf(f_trans,"%s\n",translator->phon_out);
  1303. if(!iswalpha(0x010d))
  1304. {
  1305. // check that c-caron is recognized as an alphabetic character
  1306. fprintf(stderr,"Warning: Accented letters are not recognized, eg: U+010D\nSet LC_CTYPE to a UTF-8 locale\n");
  1307. }
  1308. }
  1309. if(phoneme_callback != NULL)
  1310. {
  1311. phoneme_callback(translator->phon_out);
  1312. }
  1313. if(skipping_text)
  1314. {
  1315. n_phoneme_list = 0;
  1316. return(1);
  1317. }
  1318. if(mbrola_name[0] != 0)
  1319. {
  1320. #ifdef USE_MBROLA_LIB
  1321. MbrolaTranslate(phoneme_list,n_phoneme_list,NULL);
  1322. #else
  1323. MbrolaTranslate(phoneme_list,n_phoneme_list,stdout);
  1324. #endif
  1325. }
  1326. Generate(phoneme_list,&n_phoneme_list,0);
  1327. WavegenOpenSound();
  1328. if(voice_change != NULL)
  1329. {
  1330. // voice change at the end of the clause (i.e. clause was terminated by a voice change)
  1331. new_voice = LoadVoiceVariant(voice_change,0); // add a Voice instruction to wavegen at the end of the clause
  1332. }
  1333. if(new_voice)
  1334. {
  1335. // finished the current clause, now change the voice if there was an embedded
  1336. // change voice command at the end of it (i.e. clause was broken at the change voice command)
  1337. DoVoiceChange(voice);
  1338. new_voice = NULL;
  1339. }
  1340. return(1);
  1341. } // end of SpeakNextClause