PageRenderTime 82ms CodeModel.GetById 11ms app.highlight 65ms RepoModel.GetById 1ms app.codeStats 0ms

/native/external/espeak/platforms/windows/windows_sapi/ttsengobj.cpp

http://eyes-free.googlecode.com/
C++ | 773 lines | 551 code | 144 blank | 78 comment | 103 complexity | 65154278680e7585b0ae388333aac5a5 MD5 | raw file
  1/***************************************************************************
  2 *   Copyright (C) 2005 to 2007 by Jonathan Duddington                     *
  3 *   email: jonsd@users.sourceforge.net                                    *
  4 *                                                                         *
  5 *   This program is free software; you can redistribute it and/or modify  *
  6 *   it under the terms of the GNU General Public License as published by  *
  7 *   the Free Software Foundation; either version 3 of the License, or     *
  8 *   (at your option) any later version.                                   *
  9 *                                                                         *
 10 *   This program is distributed in the hope that it will be useful,       *
 11 *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
 12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
 13 *   GNU General Public License for more details.                          *
 14 *                                                                         *
 15 *   You should have received a copy of the GNU General Public License     *
 16 *   along with this program; if not, write see:                           *
 17 *               <http://www.gnu.org/licenses/>.                           *
 18 ***************************************************************************/
 19
 20#include "stdafx.h"
 21#include "TtsEngObj.h"
 22
 23#include "src/speak_lib.h"
 24#include "stdio.h"
 25
 26#define CTRL_EMBEDDED  1
 27
 28CTTSEngObj *m_EngObj;
 29ISpTTSEngineSite* m_OutputSite;
 30FILE *f_log2=NULL;
 31
 32extern int AddNameData(const char *name, int wide);
 33extern void InitNamedata(void);
 34
 35int master_volume = 100;
 36int master_rate = 0;
 37
 38int gVolume = 100;
 39int gSpeed = -1;
 40int gPitch = -1;
 41int gRange = -1;
 42int gEmphasis = 0;
 43int gSayas = 0;
 44
 45
 46char *path_install = NULL;
 47
 48unsigned long audio_offset = 0;
 49unsigned long audio_latest = 0;
 50unsigned int gBufSize = 0;
 51wchar_t *TextBuf=NULL;
 52
 53typedef struct {
 54	unsigned int bufix;
 55	unsigned int textix;
 56	unsigned int cmdlen;
 57} FRAG_OFFSET;
 58
 59int srate;   // samplerate, Hz/50
 60int n_frag_offsets = 0;
 61int frag_ix = 0;
 62int frag_count=0;
 63FRAG_OFFSET *frag_offsets = NULL;
 64
 65
 66int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
 67
 68int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
 69{//================================================================
 70	int hr;
 71	wchar_t *tailptr;
 72	unsigned int text_offset;
 73	int length;
 74
 75	espeak_EVENT *event;
 76#define N_EVENTS 100
 77	int n_Events = 0;
 78	SPEVENT *Event;
 79	SPEVENT Events[N_EVENTS];
 80
 81	if(m_OutputSite->GetActions() & SPVES_ABORT)
 82		return(1);
 83
 84	m_EngObj->CheckActions(m_OutputSite);
 85
 86	// return the events
 87	for(event=events; event->type != 0; event++)
 88	{
 89		audio_latest = event->audio_position + audio_offset;
 90
 91		if((event->type == espeakEVENT_WORD) && (event->length > 0))
 92		{
 93			while(((frag_ix+1) < frag_count) &&
 94				((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
 95			{
 96				frag_ix++;
 97			}
 98			text_offset = frag_offsets[frag_ix].textix + 
 99				event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
100			length = event->length - frag_offsets[frag_ix].cmdlen;
101			frag_offsets[frag_ix].cmdlen = 0;
102
103			if(text_offset < 0)
104				text_offset = 0;
105
106			Event = &Events[n_Events++];
107			Event->eEventId             = SPEI_WORD_BOUNDARY;
108			Event->elParamType          = SPET_LPARAM_IS_UNDEFINED;
109			Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10;  // ms -> bytes
110			Event->lParam               = text_offset;
111			Event->wParam               = length;
112		}
113		if(event->type == espeakEVENT_MARK)
114		{
115			Event = &Events[n_Events++];
116			Event->eEventId             = SPEI_TTS_BOOKMARK;
117			Event->elParamType          = SPET_LPARAM_IS_STRING;
118			Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10;  // ms -> bytes
119			Event->lParam               = (long)event->id.name;
120			Event->wParam               = wcstol((wchar_t *)event->id.name,&tailptr,10);
121		}
122#ifdef deleted
123		if(event->type == espeakEVENT_SENTENCE)
124		{
125			Event = &Events[n_Events++];
126			Event->eEventId             = SPEI_SENTENCE_BOUNDARY;
127			Event->elParamType          = SPET_LPARAM_IS_UNDEFINED;
128			Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10;  // ms -> bytes
129			Event->lParam               = 0;
130			Event->wParam               = 0;  // TEMP
131		}
132#endif
133	}
134	if(n_Events > 0)
135		m_OutputSite->AddEvents(Events, n_Events );
136
137	// return the sound data
138	hr = m_OutputSite->Write(wav, numsamples*2, NULL);
139	return(hr);
140}
141
142
143
144static int ConvertRate(int new_rate)
145{//=================================
146
147	int rate;
148
149	static int rate_table[21] = {80,100,115,124,133,142,151,159,168,174,180,
150				187,196,208,220,240,270,300,335,369,390 };
151
152	rate = new_rate + master_rate;
153	if(rate < -10) rate = -10;
154	if(rate > 10) rate = 10;
155	return(rate_table[rate+10]);
156}  // end of ConvertRate
157
158
159static int ConvertPitch(int pitch)
160{//===============================
161	static int pitch_table[41] =
162                {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
163		54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
164//		{0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
165//		53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
166	if(pitch < -20) pitch = -20;
167	if(pitch > 20) pitch = 20;
168	return(pitch_table[pitch+20]);
169}
170
171static int ConvertRange(int range)
172{//===============================
173	static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
174	if(range < -10) range = -10;
175	if(range > 10) range = 10;
176	return(range_table[range+10]/2);
177}
178
179HRESULT CTTSEngObj::FinalConstruct()
180{//=================================
181    SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
182    HRESULT hr = S_OK;
183
184#ifdef LOG_DEBUG
185f_log2=fopen("C:\\log_espeak","a");
186if(f_log2) fprintf(f_log2,"\n****\n");
187#endif
188
189    //--- Init vars
190    m_hVoiceData = NULL;
191    m_pVoiceData = NULL;
192    m_pWordList  = NULL;
193    m_ulNumWords = 0;
194
195	m_EngObj = this;
196
197    return hr;
198} /* CTTSEngObj::FinalConstruct */
199
200
201void CTTSEngObj::FinalRelease()
202{//============================
203    SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
204
205    delete m_pWordList;
206
207#ifdef LOG_DEBUG
208if(f_log2!=NULL) fclose(f_log2);
209#endif
210
211    if( m_pVoiceData )
212    {
213        ::UnmapViewOfFile( (void*)m_pVoiceData );
214    }
215
216    if( m_hVoiceData )
217    {
218        ::CloseHandle( m_hVoiceData );
219    }
220
221} /* CTTSEngObj::FinalRelease */
222
223
224//
225//=== ISpObjectWithToken Implementation ======================================
226//
227
228void WcharToChar(char *out, const wchar_t *in, int len)
229{//====================================================
230	int ix;
231
232	for(ix=0; ix<len; ix++)
233	{
234		if((out[ix] = (char)in[ix]) == 0)
235			break;
236	}
237	out[len-1] = 0;
238}
239
240
241/*****************************************************************************
242* CTTSEngObj::SetObjectToken *
243*----------------------------*
244*   Description:
245*   Read the "VoiceName" attribute from the registry, and use it to select
246*   an eSpeak voice file
247*****************************************************************************/
248STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
249{
250	char voice[80];
251	strcpy(voice,"default");
252
253
254	SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
255	HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
256
257	if( SUCCEEDED( hr ) )
258	{
259		CSpDynamicString voicename;
260		CSpDynamicString path;
261		HRESULT hr2;
262		int len;
263
264		hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
265		if( SUCCEEDED(hr2) )
266		{
267			WcharToChar(voice,voicename,sizeof(voice));
268		}
269
270
271		hr2 = m_cpToken->GetStringValue( L"Path", &path);
272		if( SUCCEEDED(hr2) )
273		{
274			len = wcslen(path)+1;
275			path_install = (char *)malloc(len);
276			WcharToChar(path_install,path,len);
277		}
278	}
279
280	gVolume = 100;
281	gSpeed = -1;
282	gPitch = -1;
283	gRange = -1;
284	gEmphasis = 0;
285	gSayas = 0;
286
287	espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,0);
288	espeak_SetVoiceByName(voice);
289	espeak_SetSynthCallback(SynthCallback);
290	
291	return hr;
292} /* CTTSEngObj::SetObjectToken */
293
294//
295//=== ISpTTSEngine Implementation ============================================
296//
297
298#define L(c1,c2)  (c1<<8)+c2          // combine two characters into an integer
299
300static char *phoneme_names_en[] = {
301	NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
302	"A:","a","V","0","aU","@","aI",
303	"b","tS","d","D","E","3:","eI",
304	"f","g","h","I","i:","dZ","k",
305	"l","m","n","N","oU","OI","p",
306	"r","s","S","t","T","U","u:",
307	"v","w","j","z","Z",
308	NULL
309 };
310
311
312
313int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
314{//=========================================================
315	int ph;
316	int ix=2;
317	int skip=0;
318	int maxph = 49;
319	char *p;
320	int j;
321	int lang;
322	char **phoneme_names;
323	char phbuf[200];
324	espeak_VOICE *voice;
325
326	voice = espeak_GetCurrentVoice();
327	lang = (voice->languages[1] << 8) + (voice->languages[2]);
328
329	phoneme_names = phoneme_names_en;
330	maxph = 0;
331
332	if(lang == L('e','n'))
333	{
334		phoneme_names = phoneme_names_en;
335		maxph = 49;
336	}
337
338	if(maxph == 0)
339		return(0);
340
341	strcpy(phbuf,"[[");
342	while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
343	{
344		if(skip)
345		{
346			skip = 0;
347			continue;
348		}
349		if(ph > maxph)
350			continue;
351
352		p = phoneme_names[phons[0]];  // look at the phoneme after this one
353		if(p != NULL)
354		{
355			if(p[0] == '\'')
356			{
357				phbuf[ix++] = '\'';  // primary stress, put before the vowel, not after
358				skip=1;
359			}
360			if(p[0] == ',')
361			{
362				phbuf[ix++] = ',';  // secondary stress
363				skip=1;
364			}
365		}
366
367		p = phoneme_names[ph];  // look at this phoneme
368
369		if(p != NULL)
370		{
371			strcpy(&phbuf[ix],p);
372			ix += strlen(p);
373		}
374	}
375	strcpy(&phbuf[ix],"]]");
376	ix += 2;
377
378	if(pW != NULL)
379	{
380		for(j=0; j<=ix; j++)
381		{
382			pW[j] = phbuf[j];
383		}
384	}
385	return(strlen(phbuf));
386}
387
388
389
390int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
391{//============================================================================================================================
392
393	int action;
394	int control;
395	wchar_t *pW;
396	const SPVSTATE *state;
397	unsigned int ix;
398	unsigned int len;
399	unsigned int total=0;
400	char cmdbuf[50];
401	wchar_t markbuf[32];
402
403	int speed;
404	int volume;
405	int pitch;
406	int range;
407	int emphasis;
408	int sayas;
409
410	unsigned int text_offset = 0;
411
412	frag_count = 0;
413	frag_ix = 0;
414	pW = pW_start;
415
416	while(pTextFragList != NULL)
417	{
418		action = pTextFragList->State.eAction;
419		control = pOutputSite->GetActions();
420		len = pTextFragList->ulTextLen;
421
422
423		if(control & SPVES_ABORT)
424			break;
425
426		CheckActions(pOutputSite);
427		sayas = 0;
428		state = &pTextFragList->State;
429
430		switch(action)
431		{
432		case SPVA_SpellOut:
433			sayas = 0x12;   // SAYAS_CHARS;  // drop through to SPVA_Speak
434		case SPVA_Speak:
435			text_offset = pTextFragList->ulTextSrcOffset;
436			audio_offset = audio_latest;
437
438#ifdef deleted
439// attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
440			if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
441			{
442				// A single text fragment with one character. Speak as a character, not a word
443				sayas = 0x11;
444				gSayas = 0;
445			}
446#endif
447
448			if(frag_count >= n_frag_offsets)
449			{
450				if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
451				{
452					n_frag_offsets = frag_count+500;
453				}
454			}
455
456			// first set the volume, rate, pitch
457			volume = (state->Volume * master_volume)/100;
458			speed = ConvertRate(state->RateAdj);
459			pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
460			range = ConvertRange(state->PitchAdj.RangeAdj);
461			emphasis = state->EmphAdj;
462			if(emphasis != 0)
463				emphasis = 3;
464
465			len = 0;
466			if(volume != gVolume)
467			{
468				sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
469				len += strlen(&cmdbuf[len]);
470			}
471			if(speed != gSpeed)
472			{
473				sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
474				len += strlen(&cmdbuf[len]);
475			}
476			if(pitch != gPitch)
477			{
478				sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
479				len += strlen(&cmdbuf[len]);
480			}
481			if(range != gRange)
482			{
483				sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
484				len += strlen(&cmdbuf[len]);
485			}
486			if(emphasis != gEmphasis)
487			{
488				sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
489				len += strlen(&cmdbuf[len]);
490			}
491			if(sayas != gSayas)
492			{
493				sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
494				len += strlen(&cmdbuf[len]);
495			}
496
497			gVolume = volume;
498			gSpeed = speed;
499			gPitch = pitch;
500			gRange = range;
501			gEmphasis = emphasis;
502			gSayas = sayas;
503
504			total += (len + pTextFragList->ulTextLen);
505			if(pTextFragList->ulTextLen > 0)
506			{
507				total++;
508			}
509
510			if(pW != NULL)
511			{
512				for(ix=0; ix<len; ix++)
513				{
514					*pW++ = cmdbuf[ix];
515				}
516
517				frag_offsets[frag_count].textix = text_offset;
518				frag_offsets[frag_count].bufix = pW - pW_start;
519				frag_offsets[frag_count].cmdlen = len;
520
521				for(ix=0; ix<pTextFragList->ulTextLen; ix++)
522				{
523					*pW++ = pTextFragList->pTextStart[ix];
524				}
525				if(pTextFragList->ulTextLen > 0)
526				{
527					*pW++ = ' ';
528				}
529			}
530			frag_count++;
531			break;
532
533		case SPVA_Bookmark:
534			total += (2 + pTextFragList->ulTextLen);
535
536			if(pW != NULL)
537			{
538				int index;
539
540				for(ix=0; ix<pTextFragList->ulTextLen; ix++)
541				{
542					markbuf[ix] = (char )pTextFragList->pTextStart[ix];
543				}
544				markbuf[ix] = 0;
545
546				if((index = AddNameData((const char *)markbuf,1)) >= 0)
547				{
548					sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
549					len = strlen(cmdbuf);
550					for(ix=0; ix<len; ix++)
551					{
552						*pW++ = cmdbuf[ix];
553					}
554				}
555			}
556			break;
557
558		case SPVA_Pronounce:
559			total += WritePhonemes(state->pPhoneIds, pW);
560			if(pW != NULL)
561			{
562				pW += total;
563			}
564			break;
565		}
566
567
568		pTextFragList = pTextFragList->pNext;
569	}
570
571	if(pW != NULL)
572	{
573		*pW = 0;
574	}
575	*n_text = frag_count;
576
577	return(total);
578}   // end of ProcessFragList
579
580
581
582/*****************************************************************************
583* CTTSEngObj::Speak *
584*-------------------*
585*   Description:
586*       This is the primary method that SAPI calls to render text.
587*-----------------------------------------------------------------------------
588*   Input Parameters
589*
590*   pUser
591*       Pointer to the current user profile object. This object contains
592*       information like what languages are being used and this object
593*       also gives access to resources like the SAPI master lexicon object.
594*
595*   dwSpeakFlags
596*       This is a set of flags used to control the behavior of the
597*       SAPI voice object and the associated engine.
598*
599*   VoiceFmtIndex
600*       Zero based index specifying the output format that should
601*       be used during rendering.
602*
603*   pTextFragList
604*       A linked list of text fragments to be rendered. There is
605*       one fragement per XML state change. If the input text does
606*       not contain any XML markup, there will only be a single fragment.
607*
608*   pOutputSite
609*       The interface back to SAPI where all output audio samples and events are written.
610*
611*   Return Values
612*       S_OK - This should be returned after successful rendering or if
613*              rendering was interrupted because *pfContinue changed to FALSE.
614*       E_INVALIDARG 
615*       E_OUTOFMEMORY
616*
617*****************************************************************************/
618STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
619                                REFGUID rguidFormatId,
620                                const WAVEFORMATEX * pWaveFormatEx,
621                                const SPVTEXTFRAG* pTextFragList,
622                                ISpTTSEngineSite* pOutputSite )
623{
624    SPDBG_FUNC( "CTTSEngObj::Speak" );
625    HRESULT hr = S_OK;
626
627	unsigned int size;
628
629	int xVolume;
630	int xSpeed;
631	int xPitch;
632	int xRange;
633	int xEmphasis;
634	int xSayas;
635	int punctuation;
636	int n_text_frag=0;
637
638    //--- Check args
639    if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
640        SP_IS_BAD_READ_PTR( pTextFragList )  )
641    {
642        hr = E_INVALIDARG;
643    }
644    else
645    {
646		InitNamedata();
647
648        //--- Init some vars
649        m_pCurrFrag   = pTextFragList;
650        m_pNextChar   = m_pCurrFrag->pTextStart;
651        m_pEndChar    = m_pNextChar + m_pCurrFrag->ulTextLen;
652        m_ullAudioOff = 0;
653
654		m_OutputSite = pOutputSite;
655
656
657		xVolume = gVolume;
658		xSpeed = gSpeed;
659		xPitch = gPitch;
660		xRange = gRange;
661		xEmphasis = gEmphasis;
662		xSayas = gSayas;
663
664		// find the size of the text buffer needed for this Speak() request
665		size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
666
667		gVolume = xVolume;
668		gSpeed = xSpeed;
669		gPitch = xPitch;
670		gRange = xRange;
671		gEmphasis = xEmphasis;
672		gSayas = xSayas;
673
674		punctuation = 0;
675		if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
676			punctuation = 1;
677
678		espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
679
680		size = (size + 50)*sizeof(wchar_t);
681
682		if(size > gBufSize)
683		{
684			size += 1000;  // some extra so we don't need to realloc() again too often
685			TextBuf = (wchar_t *)realloc(TextBuf,size);
686			if(TextBuf == NULL)
687			{
688				gBufSize=0;
689				return(1);
690			}
691			gBufSize = size;
692		}
693
694		audio_latest = 0;
695		size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
696
697		if(size > 0)
698		{
699			espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
700		}
701	}
702    return hr;
703} /* CTTSEngObj::Speak */
704
705
706
707
708
709HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
710{//==============================================================
711	int control;
712	USHORT volume;
713	long rate;
714
715	control = pOutputSite->GetActions();
716
717	if(control & SPVES_VOLUME)
718	{
719		if(pOutputSite->GetVolume(&volume) == S_OK)
720		{
721			master_volume = volume;
722		}
723	}
724	if(control & SPVES_RATE)
725	{
726		if(pOutputSite->GetRate(&rate) == S_OK)
727		{
728			master_rate = rate;
729		}
730	}
731
732	return(S_OK);
733}  // end of CTTSEngObj::CheckActions
734
735
736
737STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
738                                          GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
739{//========================================================================
740    SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
741    HRESULT hr = S_OK;
742	enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
743
744	srate = 441;
745	if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
746	{
747		srate = 320;
748		sample_rate = SPSF_16kHz16BitMono;   // an mbrola voice
749	}
750
751    hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
752
753    return hr;
754} /* CTTSEngObj::GetVoiceFormat */
755
756
757
758int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
759{//===========================================================
760	FILE *f_log3;
761	char fname[120];
762
763	f_log3 = fopen(path_log,"w");
764	sprintf(fname,"%s/",path_install);
765
766	espeak_SetVoiceByName(voice);
767	espeak_CompileDictionary(fname,f_log3,0);
768	fclose(f_log3);
769
770	return(0);
771}
772
773