PageRenderTime 57ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/tts/src/com/google/tts/TTSService.java

http://eyes-free.googlecode.com/
Java | 1546 lines | 1075 code | 138 blank | 333 comment | 219 complexity | d4d5af6571cf868a170c9f01027ae751 MD5 | raw file
Possible License(s): GPL-3.0, Apache-2.0
  1. /*
  2. * Copyright (C) 2009 Google Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License"); you may not
  5. * use this file except in compliance with the License. You may obtain a copy of
  6. * the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. * License for the specific language governing permissions and limitations under
  14. * the License.
  15. */
  16. package com.google.tts;
  17. import android.app.Service;
  18. import android.content.Context;
  19. import android.content.Intent;
  20. import android.content.pm.ActivityInfo;
  21. import android.content.pm.PackageInfo;
  22. import android.content.pm.PackageManager;
  23. import android.content.pm.ResolveInfo;
  24. import android.content.pm.PackageManager.NameNotFoundException;
  25. import android.content.res.Resources;
  26. import android.media.AudioManager;
  27. import android.media.MediaPlayer;
  28. import android.media.MediaPlayer.OnCompletionListener;
  29. import android.net.Uri;
  30. import android.os.IBinder;
  31. import android.os.RemoteCallbackList;
  32. import android.os.RemoteException;
  33. import android.preference.PreferenceManager;
  34. import com.google.tts.ITTSCallback;
  35. import android.util.Log;
  36. import android.util.TypedValue;
  37. import java.io.File;
  38. import java.io.IOException;
  39. import java.io.InputStream;
  40. import java.util.ArrayList;
  41. import java.util.Arrays;
  42. import java.util.Enumeration;
  43. import java.util.HashMap;
  44. import java.util.List;
  45. import java.util.Locale;
  46. import java.util.Properties;
  47. import java.util.concurrent.locks.ReentrantLock;
  48. import java.util.concurrent.TimeUnit;
  49. import javax.xml.parsers.FactoryConfigurationError;
  50. /**
  51. * @hide Synthesizes speech from text. This is implemented as a service so that
  52. * other applications can call the TTS without needing to bundle the TTS
  53. * in the build.
  54. */
  55. public class TTSService extends Service implements OnCompletionListener {
  56. // This is for legacy purposes. The old TTS used languages of the format
  57. // "xx-rYY" (xx denotes language and YY denotes region).
  58. // This now needs to be mapped to Locale, which is of the format:
  59. // xxx-YYY-variant (xxx denotes language, YYY denotes country, and variant
  60. // is the name of the variant).
  61. static final HashMap<String, String> langRegionToLocale = new HashMap<String, String>();
  62. static {
  63. langRegionToLocale.put("af", "afr");
  64. langRegionToLocale.put("bs", "bos");
  65. langRegionToLocale.put("zh-rHK", "yue");
  66. langRegionToLocale.put("zh", "cmn");
  67. langRegionToLocale.put("hr", "hrv");
  68. langRegionToLocale.put("cz", "ces");
  69. langRegionToLocale.put("cs", "ces");
  70. langRegionToLocale.put("nl", "nld");
  71. langRegionToLocale.put("en", "eng");
  72. langRegionToLocale.put("en-rUS", "eng-USA");
  73. langRegionToLocale.put("en-rGB", "eng-GBR");
  74. langRegionToLocale.put("eo", "epo");
  75. langRegionToLocale.put("fi", "fin");
  76. langRegionToLocale.put("fr", "fra");
  77. langRegionToLocale.put("fr-rFR", "fra-FRA");
  78. langRegionToLocale.put("de", "deu");
  79. langRegionToLocale.put("de-rDE", "deu-DEU");
  80. langRegionToLocale.put("el", "ell");
  81. langRegionToLocale.put("hi", "hin");
  82. langRegionToLocale.put("hu", "hun");
  83. langRegionToLocale.put("is", "isl");
  84. langRegionToLocale.put("id", "ind");
  85. langRegionToLocale.put("it", "ita");
  86. langRegionToLocale.put("it-rIT", "ita-ITA");
  87. langRegionToLocale.put("ku", "kur");
  88. langRegionToLocale.put("la", "lat");
  89. langRegionToLocale.put("mk", "mkd");
  90. langRegionToLocale.put("no", "nor");
  91. langRegionToLocale.put("pl", "pol");
  92. langRegionToLocale.put("pt", "por");
  93. langRegionToLocale.put("ro", "ron");
  94. langRegionToLocale.put("ru", "rus");
  95. langRegionToLocale.put("sr", "srp");
  96. langRegionToLocale.put("sk", "slk");
  97. langRegionToLocale.put("es", "spa");
  98. langRegionToLocale.put("es-rES", "spa-ESP");
  99. langRegionToLocale.put("es-rMX", "spa-MEX");
  100. langRegionToLocale.put("sw", "swa");
  101. langRegionToLocale.put("sv", "swe");
  102. langRegionToLocale.put("ta", "tam");
  103. langRegionToLocale.put("tr", "tur");
  104. langRegionToLocale.put("vi", "vie");
  105. langRegionToLocale.put("cy", "cym");
  106. }
  107. private static class SpeechItem {
  108. public static final int TEXT = 0;
  109. public static final int EARCON = 1;
  110. public static final int SILENCE = 2;
  111. public static final int TEXT_TO_FILE = 3;
  112. public String mText = "";
  113. public ArrayList<String> mParams = null;
  114. public int mType = TEXT;
  115. public long mDuration = 0;
  116. public String mFilename = null;
  117. public String mCallingApp = "";
  118. public SpeechItem(String source, String text, ArrayList<String> params, int itemType) {
  119. mText = text;
  120. mParams = params;
  121. mType = itemType;
  122. mCallingApp = source;
  123. }
  124. public SpeechItem(String source, long silenceTime, ArrayList<String> params) {
  125. mDuration = silenceTime;
  126. mParams = params;
  127. mType = SILENCE;
  128. mCallingApp = source;
  129. }
  130. public SpeechItem(String source, String text, ArrayList<String> params,
  131. int itemType, String filename) {
  132. mText = text;
  133. mParams = params;
  134. mType = itemType;
  135. mFilename = filename;
  136. mCallingApp = source;
  137. }
  138. }
  139. /**
  140. * Contains the information needed to access a sound resource; the name of
  141. * the package that contains the resource and the resID of the resource
  142. * within that package.
  143. */
  144. private static class SoundResource {
  145. public String mSourcePackageName = null;
  146. public int mResId = -1;
  147. public String mFilename = null;
  148. public SoundResource(String packageName, int id) {
  149. mSourcePackageName = packageName;
  150. mResId = id;
  151. mFilename = null;
  152. }
  153. public SoundResource(String file) {
  154. mSourcePackageName = null;
  155. mResId = -1;
  156. mFilename = file;
  157. }
  158. }
  159. // If the speech queue is locked for more than 5 seconds, something has gone
  160. // very wrong with processSpeechQueue.
  161. private static final int SPEECHQUEUELOCK_TIMEOUT = 5000;
  162. private static final int MAX_SPEECH_ITEM_CHAR_LENGTH = 4000;
  163. private static final int MAX_FILENAME_LENGTH = 250;
  164. // TODO use the TTS stream type when available
  165. private static final int DEFAULT_STREAM_TYPE = AudioManager.STREAM_MUSIC;
  166. private static final String ACTION = "android.intent.action.USE_TTS";
  167. private static final String CATEGORY = "android.intent.category.TTS";
  168. private static final String BETA_ACTION = "com.google.intent.action.START_TTS_SERVICE_BETA";
  169. private static final String BETA_CATEGORY = "com.google.intent.category.TTS_BETA";
  170. private static final String PKGNAME = "android.tts";
  171. // Change this to the system/lib path when in the framework
  172. private static final String DEFAULT_SYNTH = "com.svox.pico";
  173. protected static final String SERVICE_TAG = "TtsService";
  174. private final RemoteCallbackList<ITtsCallbackBeta> mCallbacks = new RemoteCallbackList<ITtsCallbackBeta>();
  175. private HashMap<String, ITtsCallbackBeta> mCallbacksMap;
  176. private final RemoteCallbackList<ITTSCallback> mCallbacksOld = new RemoteCallbackList<ITTSCallback>();
  177. private boolean mIsSpeaking;
  178. private boolean mSynthBusy;
  179. private ArrayList<SpeechItem> mSpeechQueue;
  180. private HashMap<String, SoundResource> mEarcons;
  181. private HashMap<String, SoundResource> mUtterances;
  182. private MediaPlayer mPlayer;
  183. private SpeechItem mCurrentSpeechItem;
  184. private HashMap<SpeechItem, Boolean> mKillList; // Used to ensure that
  185. // in-flight synth calls
  186. // are killed when stop is used.
  187. private TTSService mSelf;
  188. // lock for the speech queue (mSpeechQueue) and the current speech item
  189. // (mCurrentSpeechItem)
  190. private final ReentrantLock speechQueueLock = new ReentrantLock();
  191. private final ReentrantLock synthesizerLock = new ReentrantLock();
  192. private static SynthProxyBeta sNativeSynth = null;
  193. private String currentSpeechEngineSOFile = "";
  194. private boolean deprecatedKeepBlockingFlag = false;
  195. @Override
  196. public void onCreate() {
  197. super.onCreate();
  198. Log.v(SERVICE_TAG, "TtsService.onCreate()");
  199. // String soLibPath = "/data/data/com.google.tts/lib/libttspico.so";
  200. // Use this path when building in the framework:
  201. // setEngine("/system/lib/libttspico.so");
  202. // setEngine("/data/data/com.google.tts/lib/libespeakengine.so");
  203. // setEngine("/data/data/com.google.tts/lib/libttspico.so");
  204. // Also, switch to using the system settings in the framework.
  205. currentSpeechEngineSOFile = "";
  206. String preferredEngine = PreferenceManager.getDefaultSharedPreferences(this).getString(
  207. "tts_default_synth", DEFAULT_SYNTH);
  208. if (setEngine(preferredEngine) != TextToSpeechBeta.SUCCESS) {
  209. Log.e(SERVICE_TAG, "Unable to start up with " + preferredEngine
  210. + ". Falling back to the default TTS engine.");
  211. setEngine(DEFAULT_SYNTH);
  212. }
  213. mSelf = this;
  214. mIsSpeaking = false;
  215. mSynthBusy = false;
  216. mEarcons = new HashMap<String, SoundResource>();
  217. mUtterances = new HashMap<String, SoundResource>();
  218. mCallbacksMap = new HashMap<String, ITtsCallbackBeta>();
  219. mSpeechQueue = new ArrayList<SpeechItem>();
  220. mPlayer = null;
  221. mCurrentSpeechItem = null;
  222. mKillList = new HashMap<SpeechItem, Boolean>();
  223. setDefaultSettings();
  224. // Standalone library only - include a set of default earcons
  225. // and pre-recorded audio.
  226. // These are not in the framework due to concerns about the size.
  227. Resources res = getResources();
  228. InputStream fis = res.openRawResource(R.raw.soundsamples);
  229. try {
  230. Properties soundsamples = new Properties();
  231. soundsamples.load(fis);
  232. Enumeration<Object> textKeys = soundsamples.keys();
  233. while (textKeys.hasMoreElements()) {
  234. String text = textKeys.nextElement().toString();
  235. String name = "com.google.tts:raw/" + soundsamples.getProperty(text);
  236. TypedValue value = new TypedValue();
  237. getResources().getValue(name, value, false);
  238. mUtterances.put(text, new SoundResource(PKGNAME, value.resourceId));
  239. }
  240. } catch (FactoryConfigurationError e) {
  241. e.printStackTrace();
  242. } catch (IOException e) {
  243. e.printStackTrace();
  244. } catch (IllegalArgumentException e) {
  245. e.printStackTrace();
  246. } catch (SecurityException e) {
  247. e.printStackTrace();
  248. }
  249. // Deprecated - these should be earcons from now on!
  250. // Leave this here for one more version before removing it completely.
  251. mUtterances.put("[tock]", new SoundResource(PKGNAME, R.raw.tock_snd));
  252. mUtterances.put("[slnc]", new SoundResource(PKGNAME, R.raw.slnc_snd));
  253. mEarcons.put("[tock]", new SoundResource(PKGNAME, R.raw.tock_snd));
  254. mEarcons.put("[slnc]", new SoundResource(PKGNAME, R.raw.slnc_snd));
  255. Log.e(SERVICE_TAG, "onCreate completed.");
  256. }
  257. @Override
  258. public void onDestroy() {
  259. super.onDestroy();
  260. killAllUtterances();
  261. // Don't hog the media player
  262. cleanUpPlayer();
  263. if (sNativeSynth != null) {
  264. sNativeSynth.shutdown();
  265. }
  266. sNativeSynth = null;
  267. // Unregister all callbacks.
  268. mCallbacks.kill();
  269. mCallbacksOld.kill();
  270. Log.v(SERVICE_TAG, "onDestroy() completed");
  271. }
  272. private int setEngine(String enginePackageName) {
  273. if (isDefaultEnforced()){
  274. enginePackageName = getDefaultEngine();
  275. }
  276. // This is a hack to prevent the user from trying to run the Pico
  277. // engine if they are on Cupcake since the Pico auto-install only works
  278. // on
  279. // Donut or higher and no equivalent has not been backported.
  280. int sdkInt = 4;
  281. try {
  282. sdkInt = Integer.parseInt(android.os.Build.VERSION.SDK);
  283. } catch (NumberFormatException e) {
  284. Log.e(SERVICE_TAG, "Unable to parse SDK version: " + android.os.Build.VERSION.SDK);
  285. }
  286. if ((sdkInt < 4) && enginePackageName.equals("com.svox.pico")) {
  287. enginePackageName = "com.google.tts";
  288. }
  289. String soFilename = "";
  290. // The SVOX TTS is an exception to how the TTS packaging scheme works
  291. // because
  292. // it is part of the system and not a 3rd party add-on; thus its binary
  293. // is
  294. // actually located under /system/lib/
  295. if (enginePackageName.equals("com.svox.pico")) {
  296. // This is the path to use when this is integrated with the
  297. // framework
  298. // soFilename = "/system/lib/libttspico.so";
  299. if (sdkInt < 5) {
  300. soFilename = "/data/data/com.google.tts/lib/libttspico_4.so";
  301. } else {
  302. soFilename = "/data/data/com.google.tts/lib/libttspico.so";
  303. }
  304. } else {
  305. // Find the package
  306. // This is the correct way to do this; but it won't work in Cupcake.
  307. // :(
  308. // Intent intent = new
  309. // Intent("android.intent.action.START_TTS_ENGINE");
  310. // intent.setPackage(enginePackageName);
  311. // ResolveInfo[] enginesArray = new ResolveInfo[0];
  312. // PackageManager pm = getPackageManager();
  313. // List <ResolveInfo> resolveInfos =
  314. // pm.queryIntentActivities(intent, 0);
  315. // if ((resolveInfos == null) || resolveInfos.isEmpty()) {
  316. // Log.e(SERVICE_TAG, "Invalid TTS Engine Package: " +
  317. // enginePackageName);
  318. // return TextToSpeechBeta.ERROR;
  319. // }
  320. // enginesArray = resolveInfos.toArray(enginesArray);
  321. // // Generate the TTS .so filename from the package
  322. // ActivityInfo aInfo = enginesArray[0].activityInfo;
  323. // soFilename = aInfo.name.replace(aInfo.packageName + ".", "") +
  324. // ".so";
  325. // soFilename = soFilename.toLowerCase();
  326. // soFilename = "/data/data/" + aInfo.packageName + "/lib/libtts" +
  327. // soFilename;
  328. /* Start of hacky way of doing this */
  329. // Using a loop since we can't set the package name for the intent
  330. // in
  331. // Cupcake
  332. Intent intent = new Intent("android.intent.action.START_TTS_ENGINE");
  333. ResolveInfo[] enginesArray = new ResolveInfo[0];
  334. PackageManager pm = getPackageManager();
  335. List<ResolveInfo> resolveInfos = pm.queryIntentActivities(intent, 0);
  336. enginesArray = resolveInfos.toArray(enginesArray);
  337. ActivityInfo aInfo = null;
  338. for (int i = 0; i < enginesArray.length; i++) {
  339. if (enginesArray[i].activityInfo.packageName.equals(enginePackageName)) {
  340. aInfo = enginesArray[i].activityInfo;
  341. break;
  342. }
  343. }
  344. if (aInfo == null) {
  345. Log.e(SERVICE_TAG, "Invalid TTS Engine Package: " + enginePackageName);
  346. return TextToSpeechBeta.ERROR;
  347. }
  348. // Try to get a platform SDK specific binary
  349. if (sdkInt < 5) {
  350. sdkInt = 4;
  351. }
  352. soFilename = aInfo.name.replace(aInfo.packageName + ".", "") + "_" + sdkInt + ".so";
  353. soFilename = soFilename.toLowerCase();
  354. soFilename = "/data/data/" + aInfo.packageName + "/lib/libtts" + soFilename;
  355. File f = new File(soFilename);
  356. // If no such binary is available, default to a generic binary
  357. if (!f.exists()) {
  358. soFilename = aInfo.name.replace(aInfo.packageName + ".", "") + ".so";
  359. soFilename = soFilename.toLowerCase();
  360. soFilename = "/data/data/" + aInfo.packageName + "/lib/libtts" + soFilename;
  361. }
  362. /* End of hacky way of doing this */
  363. }
  364. if (currentSpeechEngineSOFile.equals(soFilename)) {
  365. return TextToSpeechBeta.SUCCESS;
  366. }
  367. File f = new File(soFilename);
  368. if (!f.exists()) {
  369. Log.e(SERVICE_TAG, "Invalid TTS Binary: " + soFilename);
  370. return TextToSpeechBeta.ERROR;
  371. }
  372. if (sNativeSynth != null) {
  373. // Should really be a stopSync here, but that is not available in
  374. // Donut...
  375. // sNativeSynth.stopSync();
  376. sNativeSynth.stop();
  377. sNativeSynth.shutdown();
  378. sNativeSynth = null;
  379. }
  380. sNativeSynth = new SynthProxyBeta(soFilename);
  381. currentSpeechEngineSOFile = soFilename;
  382. return TextToSpeechBeta.SUCCESS;
  383. }
  384. private void setDefaultSettings() {
  385. setLanguage("", getDefaultLanguage(), getDefaultCountry(), getDefaultLocVariant());
  386. // speech rate
  387. setSpeechRate("", getDefaultRate());
  388. }
  389. private boolean isDefaultEnforced() {
  390. return (PreferenceManager.getDefaultSharedPreferences(this).getInt("toggle_use_default_tts_settings",
  391. 0) == 1);
  392. // In the framework, use the Secure settings instead by doing:
  393. //
  394. // return (android.provider.Settings.Secure.getInt(mResolver,
  395. // android.provider.Settings.Secure.TTS_USE_DEFAULTS,
  396. // TextToSpeechBeta.Engine.USE_DEFAULTS)
  397. // == 1 );
  398. }
  399. private String getDefaultEngine() {
  400. // In the framework, use the Secure settings instead by doing:
  401. // String defaultEngine = android.provider.Settings.Secure.getString(mResolver,
  402. // android.provider.Settings.Secure.TTS_DEFAULT_SYNTH);
  403. String defaultEngine = PreferenceManager.getDefaultSharedPreferences(this).getString("tts_default_synth", DEFAULT_SYNTH);
  404. if (defaultEngine == null) {
  405. return TextToSpeechBeta.Engine.DEFAULT_SYNTH;
  406. } else {
  407. return defaultEngine;
  408. }
  409. }
  410. private int getDefaultRate() {
  411. return Integer.parseInt(PreferenceManager.getDefaultSharedPreferences(this).getString(
  412. "rate_pref", "100"));
  413. // In the framework, use the Secure settings instead by doing:
  414. //
  415. // return android.provider.Settings.Secure.getInt(mResolver,
  416. // android.provider.Settings.Secure.TTS_DEFAULT_RATE,
  417. // TextToSpeechBeta.Engine.DEFAULT_RATE);
  418. }
  419. private int getDefaultPitch() {
  420. // Pitch is not user settable; the default pitch is always 100.
  421. return 100;
  422. }
  423. private String getDefaultLanguage() {
  424. return PreferenceManager.getDefaultSharedPreferences(this).getString(
  425. "tts_default_lang", Locale.getDefault().getISO3Language());
  426. // In the framework, use the Secure settings instead by doing:
  427. //
  428. // String defaultLang =
  429. // android.provider.Settings.Secure.getString(mResolver,
  430. // android.provider.Settings.Secure.TTS_DEFAULT_LANG);
  431. }
  432. private String getDefaultCountry() {
  433. return PreferenceManager.getDefaultSharedPreferences(this).getString(
  434. "tts_default_country", Locale.getDefault().getISO3Country());
  435. // In the framework, use the Secure settings instead by doing:
  436. //
  437. // String defaultCountry =
  438. // android.provider.Settings.Secure.getString(mResolver,
  439. // android.provider.Settings.Secure.TTS_DEFAULT_COUNTRY);
  440. }
  441. private String getDefaultLocVariant() {
  442. return PreferenceManager.getDefaultSharedPreferences(this).getString(
  443. "tts_default_variant", Locale.getDefault().getVariant());
  444. // In the framework, use the Secure settings instead by doing:
  445. //
  446. // String defaultVar =
  447. // android.provider.Settings.Secure.getString(mResolver,
  448. // android.provider.Settings.Secure.TTS_DEFAULT_VARIANT);
  449. }
  450. private int setSpeechRate(String callingApp, int rate) {
  451. int res = TextToSpeechBeta.ERROR;
  452. try {
  453. if (isDefaultEnforced()) {
  454. res = sNativeSynth.setSpeechRate(getDefaultRate());
  455. } else {
  456. res = sNativeSynth.setSpeechRate(rate);
  457. }
  458. } catch (NullPointerException e) {
  459. // synth will become null during onDestroy()
  460. res = TextToSpeechBeta.ERROR;
  461. }
  462. return res;
  463. }
  464. private int setPitch(String callingApp, int pitch) {
  465. int res = TextToSpeechBeta.ERROR;
  466. try {
  467. res = sNativeSynth.setPitch(pitch);
  468. } catch (NullPointerException e) {
  469. // synth will become null during onDestroy()
  470. res = TextToSpeechBeta.ERROR;
  471. }
  472. return res;
  473. }
  474. private int isLanguageAvailable(String lang, String country, String variant) {
  475. int res = TextToSpeechBeta.LANG_NOT_SUPPORTED;
  476. try {
  477. res = sNativeSynth.isLanguageAvailable(lang, country, variant);
  478. } catch (NullPointerException e) {
  479. // synth will become null during onDestroy()
  480. res = TextToSpeechBeta.LANG_NOT_SUPPORTED;
  481. }
  482. return res;
  483. }
  484. private String[] getLanguage() {
  485. try {
  486. return sNativeSynth.getLanguage();
  487. } catch (Exception e) {
  488. return null;
  489. }
  490. }
  491. private int setLanguage(String callingApp, String lang, String country, String variant) {
  492. Log
  493. .v(SERVICE_TAG, "TtsService.setLanguage(" + lang + ", " + country + ", " + variant
  494. + ")");
  495. int res = TextToSpeechBeta.ERROR;
  496. try {
  497. if (isDefaultEnforced()) {
  498. res = sNativeSynth.setLanguage(getDefaultLanguage(), getDefaultCountry(),
  499. getDefaultLocVariant());
  500. } else {
  501. res = sNativeSynth.setLanguage(lang, country, variant);
  502. }
  503. } catch (NullPointerException e) {
  504. // synth will become null during onDestroy()
  505. res = TextToSpeechBeta.ERROR;
  506. }
  507. return res;
  508. }
  509. /**
  510. * Adds a sound resource to the TTS.
  511. *
  512. * @param text The text that should be associated with the sound resource
  513. * @param packageName The name of the package which has the sound resource
  514. * @param resId The resource ID of the sound within its package
  515. */
  516. private void addSpeech(String callingApp, String text, String packageName, int resId) {
  517. mUtterances.put(text, new SoundResource(packageName, resId));
  518. }
  519. /**
  520. * Adds a sound resource to the TTS.
  521. *
  522. * @param text The text that should be associated with the sound resource
  523. * @param filename The filename of the sound resource. This must be a
  524. * complete path like: (/sdcard/mysounds/mysoundbite.mp3).
  525. */
  526. private void addSpeech(String callingApp, String text, String filename) {
  527. mUtterances.put(text, new SoundResource(filename));
  528. }
  529. /**
  530. * Adds a sound resource to the TTS as an earcon.
  531. *
  532. * @param earcon The text that should be associated with the sound resource
  533. * @param packageName The name of the package which has the sound resource
  534. * @param resId The resource ID of the sound within its package
  535. */
  536. private void addEarcon(String callingApp, String earcon, String packageName, int resId) {
  537. mEarcons.put(earcon, new SoundResource(packageName, resId));
  538. }
  539. /**
  540. * Adds a sound resource to the TTS as an earcon.
  541. *
  542. * @param earcon The text that should be associated with the sound resource
  543. * @param filename The filename of the sound resource. This must be a
  544. * complete path like: (/sdcard/mysounds/mysoundbite.mp3).
  545. */
  546. private void addEarcon(String callingApp, String earcon, String filename) {
  547. mEarcons.put(earcon, new SoundResource(filename));
  548. }
  549. /**
  550. * Speaks the given text using the specified queueing mode and parameters.
  551. *
  552. * @param text The text that should be spoken
  553. * @param queueMode TextToSpeech.TTS_QUEUE_FLUSH for no queue (interrupts
  554. * all previous utterances), TextToSpeech.TTS_QUEUE_ADD for
  555. * queued
  556. * @param params An ArrayList of parameters. This is not implemented for all
  557. * engines.
  558. */
  559. private int speak(String callingApp, String text, int queueMode, ArrayList<String> params) {
  560. Log.v(SERVICE_TAG, "TTS service received " + text);
  561. if (queueMode == TextToSpeechBeta.QUEUE_FLUSH) {
  562. stop(callingApp);
  563. } else if (queueMode == 2) {
  564. stopAll(callingApp);
  565. }
  566. mSpeechQueue.add(new SpeechItem(callingApp, text, params, SpeechItem.TEXT));
  567. if (!mIsSpeaking) {
  568. processSpeechQueue();
  569. }
  570. return TextToSpeechBeta.SUCCESS;
  571. }
  572. /**
  573. * Plays the earcon using the specified queueing mode and parameters.
  574. *
  575. * @param earcon The earcon that should be played
  576. * @param queueMode TextToSpeech.TTS_QUEUE_FLUSH for no queue (interrupts
  577. * all previous utterances), TextToSpeech.TTS_QUEUE_ADD for
  578. * queued
  579. * @param params An ArrayList of parameters. This is not implemented for all
  580. * engines.
  581. */
  582. private int playEarcon(String callingApp, String earcon, int queueMode, ArrayList<String> params) {
  583. if (queueMode == TextToSpeechBeta.QUEUE_FLUSH) {
  584. stop(callingApp);
  585. } else if (queueMode == 2) {
  586. stopAll(callingApp);
  587. }
  588. mSpeechQueue.add(new SpeechItem(callingApp, earcon, params, SpeechItem.EARCON));
  589. if (!mIsSpeaking) {
  590. processSpeechQueue();
  591. }
  592. return TextToSpeechBeta.SUCCESS;
  593. }
  594. /**
  595. * Stops all speech output and removes any utterances still in the queue for
  596. * the calling app.
  597. */
  598. private int stop(String callingApp) {
  599. int result = TextToSpeechBeta.ERROR;
  600. boolean speechQueueAvailable = false;
  601. try{
  602. speechQueueAvailable =
  603. speechQueueLock.tryLock(SPEECHQUEUELOCK_TIMEOUT, TimeUnit.MILLISECONDS);
  604. if (speechQueueAvailable) {
  605. Log.i(SERVICE_TAG, "Stopping");
  606. for (int i = mSpeechQueue.size() - 1; i > -1; i--){
  607. if (mSpeechQueue.get(i).mCallingApp.equals(callingApp)){
  608. mSpeechQueue.remove(i);
  609. }
  610. }
  611. if ((mCurrentSpeechItem != null) &&
  612. mCurrentSpeechItem.mCallingApp.equals(callingApp)) {
  613. try {
  614. result = sNativeSynth.stop();
  615. } catch (NullPointerException e1) {
  616. // synth will become null during onDestroy()
  617. result = TextToSpeechBeta.ERROR;
  618. }
  619. mKillList.put(mCurrentSpeechItem, true);
  620. if (mPlayer != null) {
  621. try {
  622. mPlayer.stop();
  623. } catch (IllegalStateException e) {
  624. // Do nothing, the player is already stopped.
  625. }
  626. }
  627. mIsSpeaking = false;
  628. mCurrentSpeechItem = null;
  629. } else {
  630. result = TextToSpeechBeta.SUCCESS;
  631. }
  632. Log.i(SERVICE_TAG, "Stopped");
  633. } else {
  634. Log.e(SERVICE_TAG, "TTS stop(): queue locked longer than expected");
  635. result = TextToSpeechBeta.ERROR;
  636. }
  637. } catch (InterruptedException e) {
  638. Log.e(SERVICE_TAG, "TTS stop: tryLock interrupted");
  639. e.printStackTrace();
  640. } finally {
  641. // This check is needed because finally will always run; even if the
  642. // method returns somewhere in the try block.
  643. if (speechQueueAvailable) {
  644. speechQueueLock.unlock();
  645. }
  646. return result;
  647. }
  648. }
  649. /**
  650. * Stops all speech output, both rendered to a file and directly spoken, and
  651. * removes any utterances still in the queue globally. Files that were being
  652. * written are deleted.
  653. */
  654. @SuppressWarnings("finally")
  655. private int killAllUtterances() {
  656. int result = TextToSpeechBeta.ERROR;
  657. boolean speechQueueAvailable = false;
  658. try {
  659. speechQueueAvailable = speechQueueLock.tryLock(SPEECHQUEUELOCK_TIMEOUT,
  660. TimeUnit.MILLISECONDS);
  661. if (speechQueueAvailable) {
  662. // remove every single entry in the speech queue
  663. mSpeechQueue.clear();
  664. // clear the current speech item
  665. if (mCurrentSpeechItem != null) {
  666. // Should be a stopSync - only using a stop for Donut
  667. // compatibility
  668. // result = sNativeSynth.stopSync();
  669. result = sNativeSynth.stop();
  670. mKillList.put(mCurrentSpeechItem, true);
  671. mIsSpeaking = false;
  672. // was the engine writing to a file?
  673. if (mCurrentSpeechItem.mType == SpeechItem.TEXT_TO_FILE) {
  674. // delete the file that was being written
  675. if (mCurrentSpeechItem.mFilename != null) {
  676. File tempFile = new File(mCurrentSpeechItem.mFilename);
  677. Log.v(SERVICE_TAG, "Leaving behind " + mCurrentSpeechItem.mFilename);
  678. if (tempFile.exists()) {
  679. Log.v(SERVICE_TAG, "About to delete "
  680. + mCurrentSpeechItem.mFilename);
  681. if (tempFile.delete()) {
  682. Log.v(SERVICE_TAG, "file successfully deleted");
  683. }
  684. }
  685. }
  686. }
  687. mCurrentSpeechItem = null;
  688. }
  689. } else {
  690. Log.e(SERVICE_TAG, "TTS killAllUtterances(): queue locked longer than expected");
  691. result = TextToSpeechBeta.ERROR;
  692. }
  693. } catch (InterruptedException e) {
  694. Log.e(SERVICE_TAG, "TTS killAllUtterances(): tryLock interrupted");
  695. result = TextToSpeechBeta.ERROR;
  696. } finally {
  697. // This check is needed because finally will always run, even if the
  698. // method returns somewhere in the try block.
  699. if (speechQueueAvailable) {
  700. speechQueueLock.unlock();
  701. }
  702. return result;
  703. }
  704. }
  705. /**
  706. * Stops all speech output and removes any utterances still in the queue
  707. * globally, except those intended to be synthesized to file.
  708. */
  709. private int stopAll(String callingApp) {
  710. int result = TextToSpeechBeta.ERROR;
  711. boolean speechQueueAvailable = false;
  712. try{
  713. speechQueueAvailable =
  714. speechQueueLock.tryLock(SPEECHQUEUELOCK_TIMEOUT, TimeUnit.MILLISECONDS);
  715. if (speechQueueAvailable) {
  716. for (int i = mSpeechQueue.size() - 1; i > -1; i--){
  717. if (mSpeechQueue.get(i).mType != SpeechItem.TEXT_TO_FILE){
  718. mSpeechQueue.remove(i);
  719. }
  720. }
  721. if ((mCurrentSpeechItem != null) &&
  722. ((mCurrentSpeechItem.mType != SpeechItem.TEXT_TO_FILE) ||
  723. mCurrentSpeechItem.mCallingApp.equals(callingApp))) {
  724. try {
  725. result = sNativeSynth.stop();
  726. } catch (NullPointerException e1) {
  727. // synth will become null during onDestroy()
  728. result = TextToSpeechBeta.ERROR;
  729. }
  730. mKillList.put(mCurrentSpeechItem, true);
  731. if (mPlayer != null) {
  732. try {
  733. mPlayer.stop();
  734. } catch (IllegalStateException e) {
  735. // Do nothing, the player is already stopped.
  736. }
  737. }
  738. mIsSpeaking = false;
  739. mCurrentSpeechItem = null;
  740. } else {
  741. result = TextToSpeechBeta.SUCCESS;
  742. }
  743. Log.i(SERVICE_TAG, "Stopped all");
  744. } else {
  745. Log.e(SERVICE_TAG, "TTS stopAll(): queue locked longer than expected");
  746. result = TextToSpeechBeta.ERROR;
  747. }
  748. } catch (InterruptedException e) {
  749. Log.e(SERVICE_TAG, "TTS stopAll: tryLock interrupted");
  750. e.printStackTrace();
  751. } finally {
  752. // This check is needed because finally will always run; even if the
  753. // method returns somewhere in the try block.
  754. if (speechQueueAvailable) {
  755. speechQueueLock.unlock();
  756. }
  757. return result;
  758. }
  759. }
  760. public void onCompletion(MediaPlayer arg0) {
  761. // mCurrentSpeechItem may become null if it is stopped at the same
  762. // time it completes.
  763. SpeechItem currentSpeechItemCopy = mCurrentSpeechItem;
  764. if (currentSpeechItemCopy != null) {
  765. String callingApp = currentSpeechItemCopy.mCallingApp;
  766. ArrayList<String> params = currentSpeechItemCopy.mParams;
  767. String utteranceId = "";
  768. if (params != null) {
  769. for (int i = 0; i < params.size() - 1; i = i + 2) {
  770. String param = params.get(i);
  771. if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_UTTERANCE_ID)) {
  772. utteranceId = params.get(i + 1);
  773. }
  774. }
  775. }
  776. if (utteranceId.length() > 0) {
  777. dispatchUtteranceCompletedCallback(utteranceId, callingApp);
  778. }
  779. }
  780. processSpeechQueue();
  781. }
  782. private int playSilence(String callingApp, long duration, int queueMode,
  783. ArrayList<String> params) {
  784. if (queueMode == TextToSpeechBeta.QUEUE_FLUSH) {
  785. stop(callingApp);
  786. }
  787. mSpeechQueue.add(new SpeechItem(callingApp, duration, params));
  788. if (!mIsSpeaking) {
  789. processSpeechQueue();
  790. }
  791. return TextToSpeechBeta.SUCCESS;
  792. }
  793. private void silence(final SpeechItem speechItem) {
  794. class SilenceThread implements Runnable {
  795. public void run() {
  796. String utteranceId = "";
  797. if (speechItem.mParams != null){
  798. for (int i = 0; i < speechItem.mParams.size() - 1; i = i + 2){
  799. String param = speechItem.mParams.get(i);
  800. if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_UTTERANCE_ID)){
  801. utteranceId = speechItem.mParams.get(i+1);
  802. }
  803. }
  804. }
  805. try {
  806. Thread.sleep(speechItem.mDuration);
  807. } catch (InterruptedException e) {
  808. e.printStackTrace();
  809. } finally {
  810. if (utteranceId.length() > 0){
  811. dispatchUtteranceCompletedCallback(utteranceId, speechItem.mCallingApp);
  812. }
  813. processSpeechQueue();
  814. }
  815. }
  816. }
  817. Thread slnc = (new Thread(new SilenceThread()));
  818. slnc.setPriority(Thread.MIN_PRIORITY);
  819. slnc.start();
  820. }
  821. boolean synthThreadBusy = false;
  822. private void speakInternalOnly(final SpeechItem speechItem) {
  823. Log.e(SERVICE_TAG, "Creating synth thread for: " + speechItem.mText);
  824. class SynthThread implements Runnable {
  825. public void run() {
  826. boolean synthAvailable = false;
  827. String utteranceId = "";
  828. try {
  829. synthAvailable = synthesizerLock.tryLock();
  830. if (!synthAvailable) {
  831. mSynthBusy = true;
  832. Thread.sleep(100);
  833. Thread synth = (new Thread(new SynthThread()));
  834. synth.start();
  835. mSynthBusy = false;
  836. return;
  837. }
  838. int streamType = DEFAULT_STREAM_TYPE;
  839. String language = "";
  840. String country = "";
  841. String variant = "";
  842. String speechRate = "";
  843. String engine = "";
  844. String pitch = "";
  845. if (speechItem.mParams != null){
  846. for (int i = 0; i < speechItem.mParams.size() - 1; i = i + 2){
  847. String param = speechItem.mParams.get(i);
  848. if (param != null) {
  849. if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_RATE)) {
  850. speechRate = speechItem.mParams.get(i + 1);
  851. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_LANGUAGE)) {
  852. language = speechItem.mParams.get(i + 1);
  853. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_COUNTRY)) {
  854. country = speechItem.mParams.get(i + 1);
  855. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_VARIANT)) {
  856. variant = speechItem.mParams.get(i + 1);
  857. } else if (param
  858. .equals(TextToSpeechBeta.Engine.KEY_PARAM_UTTERANCE_ID)) {
  859. utteranceId = speechItem.mParams.get(i + 1);
  860. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_STREAM)) {
  861. try {
  862. streamType = Integer
  863. .parseInt(speechItem.mParams.get(i + 1));
  864. } catch (NumberFormatException e) {
  865. streamType = DEFAULT_STREAM_TYPE;
  866. }
  867. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_ENGINE)) {
  868. engine = speechItem.mParams.get(i + 1);
  869. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_PITCH)) {
  870. pitch = speechItem.mParams.get(i + 1);
  871. }
  872. }
  873. }
  874. }
  875. // Only do the synthesis if it has not been killed by a subsequent utterance.
  876. if (mKillList.get(speechItem) == null) {
  877. if (engine.length() > 0) {
  878. setEngine(engine);
  879. } else {
  880. setEngine(getDefaultEngine());
  881. }
  882. if (language.length() > 0){
  883. setLanguage("", language, country, variant);
  884. } else {
  885. setLanguage("", getDefaultLanguage(), getDefaultCountry(),
  886. getDefaultLocVariant());
  887. }
  888. if (speechRate.length() > 0){
  889. setSpeechRate("", Integer.parseInt(speechRate));
  890. } else {
  891. setSpeechRate("", getDefaultRate());
  892. }
  893. if (pitch.length() > 0){
  894. setPitch("", Integer.parseInt(pitch));
  895. } else {
  896. setPitch("", getDefaultPitch());
  897. }
  898. try {
  899. sNativeSynth.speak(speechItem.mText, streamType);
  900. } catch (NullPointerException e) {
  901. // synth will become null during onDestroy()
  902. Log.v(SERVICE_TAG, " null synth, can't speak");
  903. }
  904. }
  905. } catch (InterruptedException e) {
  906. Log.e(SERVICE_TAG, "TTS speakInternalOnly(): tryLock interrupted");
  907. e.printStackTrace();
  908. } finally {
  909. // This check is needed because finally will always run;
  910. // even if the
  911. // method returns somewhere in the try block.
  912. if (utteranceId.length() > 0){
  913. dispatchUtteranceCompletedCallback(utteranceId, speechItem.mCallingApp);
  914. }
  915. if (synthAvailable) {
  916. synthesizerLock.unlock();
  917. processSpeechQueue();
  918. }
  919. }
  920. }
  921. }
  922. Thread synth = (new Thread(new SynthThread()));
  923. synth.setPriority(Thread.MAX_PRIORITY);
  924. synth.start();
  925. }
  926. private void synthToFileInternalOnly(final SpeechItem speechItem) {
  927. class SynthThread implements Runnable {
  928. public void run() {
  929. boolean synthAvailable = false;
  930. String utteranceId = "";
  931. Log.i(SERVICE_TAG, "Synthesizing to " + speechItem.mFilename);
  932. try {
  933. synthAvailable = synthesizerLock.tryLock();
  934. if (!synthAvailable) {
  935. synchronized (this) {
  936. mSynthBusy = true;
  937. }
  938. Thread.sleep(100);
  939. Thread synth = (new Thread(new SynthThread()));
  940. // synth.setPriority(Thread.MIN_PRIORITY);
  941. synth.start();
  942. synchronized (this) {
  943. mSynthBusy = false;
  944. }
  945. return;
  946. }
  947. String language = "";
  948. String country = "";
  949. String variant = "";
  950. String speechRate = "";
  951. String engine = "";
  952. String pitch = "";
  953. if (speechItem.mParams != null){
  954. for (int i = 0; i < speechItem.mParams.size() - 1; i = i + 2){
  955. String param = speechItem.mParams.get(i);
  956. if (param != null) {
  957. if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_RATE)) {
  958. speechRate = speechItem.mParams.get(i + 1);
  959. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_LANGUAGE)) {
  960. language = speechItem.mParams.get(i + 1);
  961. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_COUNTRY)) {
  962. country = speechItem.mParams.get(i + 1);
  963. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_VARIANT)) {
  964. variant = speechItem.mParams.get(i + 1);
  965. } else if (param
  966. .equals(TextToSpeechBeta.Engine.KEY_PARAM_UTTERANCE_ID)) {
  967. utteranceId = speechItem.mParams.get(i + 1);
  968. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_ENGINE)) {
  969. engine = speechItem.mParams.get(i + 1);
  970. } else if (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_PITCH)) {
  971. pitch = speechItem.mParams.get(i + 1);
  972. }
  973. }
  974. }
  975. }
  976. // Only do the synthesis if it has not been killed by a subsequent utterance.
  977. if (mKillList.get(speechItem) == null){
  978. if (engine.length() > 0) {
  979. setEngine(engine);
  980. } else {
  981. setEngine(getDefaultEngine());
  982. }
  983. if (language.length() > 0){
  984. setLanguage("", language, country, variant);
  985. } else {
  986. setLanguage("", getDefaultLanguage(), getDefaultCountry(),
  987. getDefaultLocVariant());
  988. }
  989. if (speechRate.length() > 0){
  990. setSpeechRate("", Integer.parseInt(speechRate));
  991. } else {
  992. setSpeechRate("", getDefaultRate());
  993. }
  994. if (pitch.length() > 0){
  995. setPitch("", Integer.parseInt(pitch));
  996. } else {
  997. setPitch("", getDefaultPitch());
  998. }
  999. try {
  1000. sNativeSynth.synthesizeToFile(speechItem.mText, speechItem.mFilename);
  1001. } catch (NullPointerException e) {
  1002. // synth will become null during onDestroy()
  1003. Log.v(SERVICE_TAG, " null synth, can't synthesize to file");
  1004. }
  1005. }
  1006. } catch (InterruptedException e) {
  1007. Log.e(SERVICE_TAG, "TTS synthToFileInternalOnly(): tryLock interrupted");
  1008. e.printStackTrace();
  1009. } finally {
  1010. // This check is needed because finally will always run;
  1011. // even if the
  1012. // method returns somewhere in the try block.
  1013. if (utteranceId.length() > 0){
  1014. dispatchUtteranceCompletedCallback(utteranceId, speechItem.mCallingApp);
  1015. }
  1016. if (synthAvailable) {
  1017. synthesizerLock.unlock();
  1018. processSpeechQueue();
  1019. }
  1020. deprecatedKeepBlockingFlag = false;
  1021. }
  1022. }
  1023. }
  1024. Thread synth = (new Thread(new SynthThread()));
  1025. synth.setPriority(Thread.MAX_PRIORITY);
  1026. synth.start();
  1027. }
  1028. private SoundResource getSoundResource(SpeechItem speechItem) {
  1029. SoundResource sr = null;
  1030. String text = speechItem.mText;
  1031. if (speechItem.mType == SpeechItem.SILENCE) {
  1032. // Do nothing if this is just silence
  1033. } else if (speechItem.mType == SpeechItem.EARCON) {
  1034. sr = mEarcons.get(text);
  1035. } else {
  1036. sr = mUtterances.get(text);
  1037. }
  1038. return sr;
  1039. }
  1040. private void broadcastTtsQueueProcessingCompleted() {
  1041. Intent i = new Intent(TextToSpeechBeta.ACTION_TTS_QUEUE_PROCESSING_COMPLETED);
  1042. sendBroadcast(i);
  1043. }
  1044. private void dispatchUtteranceCompletedCallback(String utteranceId, String packageName) {
  1045. /* Legacy support for TTS */
  1046. final int oldN = mCallbacksOld.beginBroadcast();
  1047. for (int i = 0; i < oldN; i++) {
  1048. try {
  1049. mCallbacksOld.getBroadcastItem(i).markReached("");
  1050. } catch (RemoteException e) {
  1051. // The RemoteCallbackList will take care of removing
  1052. // the dead object for us.
  1053. }
  1054. }
  1055. try {
  1056. mCallbacksOld.finishBroadcast();
  1057. } catch (IllegalStateException e) {
  1058. // May get an illegal state exception here if there is only
  1059. // one app running and it is trying to quit on completion.
  1060. // This is the exact scenario triggered by MakeBagel
  1061. return;
  1062. }
  1063. /* End of legacy support for TTS */
  1064. ITtsCallbackBeta cb = mCallbacksMap.get(packageName);
  1065. if (cb == null) {
  1066. return;
  1067. }
  1068. Log.v(SERVICE_TAG, "TTS callback: dispatch started");
  1069. // Broadcast to all clients the new value.
  1070. final int N = mCallbacks.beginBroadcast();
  1071. try {
  1072. cb.utteranceCompleted(utteranceId);
  1073. } catch (RemoteException e) {
  1074. // The RemoteCallbackList will take care of removing
  1075. // the dead object for us.
  1076. }
  1077. mCallbacks.finishBroadcast();
  1078. Log.v(SERVICE_TAG, "TTS callback: dispatch completed to " + N);
  1079. }
  1080. private SpeechItem splitCurrentTextIfNeeded(SpeechItem currentSpeechItem){
  1081. if (currentSpeechItem.mText.length() < MAX_SPEECH_ITEM_CHAR_LENGTH){
  1082. return currentSpeechItem;
  1083. } else {
  1084. String callingApp = currentSpeechItem.mCallingApp;
  1085. ArrayList<SpeechItem> splitItems = new ArrayList<SpeechItem>();
  1086. int start = 0;
  1087. int end = start + MAX_SPEECH_ITEM_CHAR_LENGTH - 1;
  1088. String splitText;
  1089. SpeechItem splitItem;
  1090. while (end < currentSpeechItem.mText.length()){
  1091. splitText = currentSpeechItem.mText.substring(start, end);
  1092. splitItem = new SpeechItem(callingApp, splitText, null, SpeechItem.TEXT);
  1093. splitItems.add(splitItem);
  1094. start = end;
  1095. end = start + MAX_SPEECH_ITEM_CHAR_LENGTH - 1;
  1096. }
  1097. splitText = currentSpeechItem.mText.substring(start);
  1098. splitItem = new SpeechItem(callingApp, splitText, null, SpeechItem.TEXT);
  1099. splitItems.add(splitItem);
  1100. mSpeechQueue.remove(0);
  1101. for (int i = splitItems.size() - 1; i >= 0; i--){
  1102. mSpeechQueue.add(0, splitItems.get(i));
  1103. }
  1104. return mSpeechQueue.get(0);
  1105. }
  1106. }
  1107. private void processSpeechQueue() {
  1108. boolean speechQueueAvailable = false;
  1109. synchronized (this) {
  1110. if (mSynthBusy){
  1111. // There is already a synth thread waiting to run.
  1112. return;
  1113. }
  1114. }
  1115. try {
  1116. speechQueueAvailable =
  1117. speechQueueLock.tryLock(SPEECHQUEUELOCK_TIMEOUT, TimeUnit.MILLISECONDS);
  1118. if (!speechQueueAvailable) {
  1119. Log.e(SERVICE_TAG, "processSpeechQueue - Speech queue is unavailable.");
  1120. return;
  1121. }
  1122. if (mSpeechQueue.size() < 1) {
  1123. mIsSpeaking = false;
  1124. mKillList.clear();
  1125. broadcastTtsQueueProcessingCompleted();
  1126. return;
  1127. }
  1128. mCurrentSpeechItem = mSpeechQueue.get(0);
  1129. mIsSpeaking = true;
  1130. SoundResource sr = getSoundResource(mCurrentSpeechItem);
  1131. // Synth speech as needed - synthesizer should call
  1132. // processSpeechQueue to continue running the queue
  1133. Log.v(SERVICE_TAG, "TTS processing: " + mCurrentSpeechItem.mText);
  1134. if (sr == null) {
  1135. if (mCurrentSpeechItem.mType == SpeechItem.TEXT) {
  1136. mCurrentSpeechItem = splitCurrentTextIfNeeded(mCurrentSpeechItem);
  1137. speakInternalOnly(mCurrentSpeechItem);
  1138. } else if (mCurrentSpeechItem.mType == SpeechItem.TEXT_TO_FILE) {
  1139. synthToFileInternalOnly(mCurrentSpeechItem);
  1140. } else {
  1141. // This is either silence or an earcon that was missing
  1142. silence(mCurrentSpeechItem);
  1143. }
  1144. } else {
  1145. cleanUpPlayer();
  1146. if (sr.mSourcePackageName == PKGNAME) {
  1147. // Utterance is part of the TTS library
  1148. mPlayer = MediaPlayer.create(this, sr.mResId);
  1149. } else if (sr.mSourcePackageName != null) {
  1150. // Utterance is part of the app calling the library
  1151. Context ctx;
  1152. try {
  1153. ctx = this.createPackageContext(sr.mSourcePackageName, 0);
  1154. } catch (NameNotFoundException e) {
  1155. e.printStackTrace();
  1156. mSpeechQueue.remove(0); // Remove it from the queue and
  1157. // move on
  1158. mIsSpeaking = false;
  1159. return;
  1160. }
  1161. mPlayer = MediaPlayer.create(ctx, sr.mResId);
  1162. } else {
  1163. // Utterance is coming from a file
  1164. mPlayer = MediaPlayer.create(this, Uri.parse(sr.mFilename));
  1165. }
  1166. // Check if Media Server is dead; if it is, clear the queue and
  1167. // give up for now - hopefully, it will recover itself.
  1168. if (mPlayer == null) {
  1169. mSpeechQueue.clear();
  1170. mIsSpeaking = false;
  1171. return;
  1172. }
  1173. mPlayer.setOnCompletionListener(this);
  1174. try {
  1175. mPlayer.setAudioStreamType(getStreamTypeFromParams(mCurrentSpeechItem.mParams));
  1176. mPlayer.start();
  1177. } catch (IllegalStateException e) {
  1178. mSpeechQueue.clear();
  1179. mIsSpeaking = false;
  1180. cleanUpPlayer();
  1181. return;
  1182. }
  1183. }
  1184. if (mSpeechQueue.size() > 0) {
  1185. mSpeechQueue.remove(0);
  1186. }
  1187. } catch (InterruptedException e) {
  1188. Log.e(SERVICE_TAG, "TTS processSpeechQueue: tryLock interrupted");
  1189. e.printStackTrace();
  1190. } finally {
  1191. // This check is needed because finally will always run; even if the
  1192. // method returns somewhere in the try block.
  1193. if (speechQueueAvailable) {
  1194. speechQueueLock.unlock();
  1195. }
  1196. }
  1197. }
  1198. private int getStreamTypeFromParams(ArrayList<String> paramList) {
  1199. int streamType = DEFAULT_STREAM_TYPE;
  1200. if (paramList == null) {
  1201. return streamType;
  1202. }
  1203. for (int i = 0; i < paramList.size() - 1; i = i + 2) {
  1204. String param = paramList.get(i);
  1205. if ((param != null) && (param.equals(TextToSpeechBeta.Engine.KEY_PARAM_STREAM))) {
  1206. try {
  1207. streamType = Integer.parseInt(paramList.get(i + 1));
  1208. } catch (NumberFormatException e) {
  1209. streamType = DEFAULT_STREAM_TYPE;
  1210. }
  1211. }
  1212. }
  1213. return streamType;
  1214. }
  1215. private void cleanUpPlayer() {
  1216. if (mPlayer != null) {
  1217. mPlayer.release();
  1218. mPlayer = null;
  1219. }
  1220. }
  1221. /**
  1222. * Synthesizes the given text to a file using the specified parameters.
  1223. *
  1224. * @param text
  1225. * The String of text that should be synthesized
  1226. * @param params
  1227. * An ArrayList of parameters. The first element of this array
  1228. * controls the type of voice to use.
  1229. * @param filename
  1230. * The string that gives the full output filename; it should be
  1231. * something like "/sdcard/myappsounds/mysound.wav".
  1232. * @return A boolean that indicates if the synthesis can be started
  1233. */
  1234. private boolean synthesizeToFile(String callingApp, String text, ArrayList<String> params,
  1235. String filename) {
  1236. // Don't allow a filename that is too long
  1237. if (filename.length() > MAX_FILENAME_LENGTH) {
  1238. return false;
  1239. }
  1240. // Don't allow anything longer than the max text length; since this
  1241. // is synthing to a file, don't even bother splitting it.
  1242. if (text.length() >= MAX_SPEECH_ITEM_CHAR_LENGTH) {
  1243. return false;
  1244. }
  1245. // Check that the output file can be created
  1246. try {
  1247. File tempFile = new File(filename);
  1248. if (tempFile.exists()) {
  1249. Log.v("TtsService", "File " + filename + " exists, deleting.");
  1250. tempFile.delete();
  1251. }
  1252. if (!tempFile.createNewFile()) {
  1253. Log.e("TtsService", "Unable to synthesize to file: can't create " + filename);
  1254. return false;
  1255. }
  1256. tempFile.delete();
  1257. } catch (IOException e) {
  1258. Log.e("TtsService", "Can't create " + filename + " due to exception " + e);
  1259. return false;
  1260. }
  1261. mSpeechQueue.add(new SpeechItem(callingApp, text, params, SpeechItem.TEXT_TO_FILE, filename));
  1262. if (!mIsSpeaking) {
  1263. processSpeechQueue();
  1264. }
  1265. return true;
  1266. }
  1267. @Override
  1268. public IBinder onBind(Intent intent) {
  1269. if (ACTION.equals(intent.getAction())) {
  1270. for (String category : intent.getCategories()) {
  1271. if (category.equals(CATEGORY)) {
  1272. return mBinderOld;
  1273. }
  1274. }
  1275. }
  1276. if (BETA_ACTION.equals(intent.getAction())) {
  1277. for (String category : intent.getCategories()) {
  1278. if (category.equals(BETA_CATEGORY)) {
  1279. return mBinder;
  1280. }
  1281. }
  1282. }
  1283. return null;
  1284. }
  1285. private final ITtsBeta.Stub mBinder = new ITtsBeta.Stub() {
  1286. public int registerCallback(String packageName, ITtsCallbackBeta cb) {
  1287. if (cb != null) {
  1288. mCallbacks.register(cb);
  1289. mCallbacksMap.put(packageName, cb);
  1290. return TextToSpeechBeta.SUCCESS;
  1291. }
  1292. return TextToSpeechBeta.ERROR;
  1293. }
  1294. public int unregisterCallback(String packageName, ITtsCallbackBeta cb) {
  1295. if (cb != null) {
  1296. mCallbacksMap.remove(packageName);
  1297. mCallbacks.unregister(cb);
  1298. return TextToSpeechBeta.SUCCESS;
  1299. }
  1300. return TextToSpeechBeta.ERROR;
  1301. }
  1302. /**
  1303. * Speaks the given text using the specified queueing mode and
  1304. * parameters.
  1305. *
  1306. * @param text The text that should be spoken
  1307. * @param queueMode TextToSpeech.TTS_QUEUE_FLUSH for no queue
  1308. * (interrupts all previous utterances)
  1309. * TextToSpeech.TTS_QUEUE_ADD for queued
  1310. * @param params An ArrayList of parameters. The first element of this
  1311. * array controls the type of voice to use.
  1312. */
  1313. public int speak(String callingApp, String text, int queueMode, String[] params) {
  1314. ArrayList<String> speakingParams = new ArrayList<String>();
  1315. if (params != null) {
  1316. speakingParams = new ArrayList<String>(Arrays.asList(params));
  1317. }
  1318. return mSelf.speak(callingApp, text, queueMode, speakingParams);
  1319. }
  1320. /**
  1321. * Plays the earcon using the specified queueing mode and parameters.
  1322. *
  1323. * @param earcon The earcon that should be played
  1324. * @param queueMode TextToSpeech.TTS_QUEUE_FLUSH for no queue
  1325. * (interrupts all previous utterances)
  1326. * TextToSpeech.TTS_QUEUE_ADD for queued
  1327. * @param params An ArrayList of parameters.
  1328. */
  1329. public int playEarcon(String callingApp, String earcon, int queueMode, String[] params) {
  1330. ArrayList<String> speakingParams = new ArrayList<String>();
  1331. if (params != null) {
  1332. speakingParams = new ArrayList<String>(Arrays.asList(params));
  1333. }
  1334. return mSelf.playEarcon(callingApp, earcon, queueMode, speakingParams);
  1335. }
  1336. /**
  1337. * Plays the silence using the specified queueing mode and parameters.
  1338. *
  1339. * @param duration The duration of the silence that should be played
  1340. * @param queueMode TextToSpeech.TTS_QUEUE_FLUSH for no queue
  1341. * (interrupts all previous utterances)
  1342. * TextToSpeech.TTS_QUEUE_ADD for queued
  1343. * @param params An ArrayList of parameters.
  1344. */
  1345. public int playSilence(String callingApp, long duration, int queueMode, String[] params) {
  1346. ArrayList<String> speakingParams = new ArrayList<String>();
  1347. if (params != null) {
  1348. speakingParams = new ArrayList<String>(Arrays.asList(params));
  1349. }
  1350. return mSelf.playSilence(callingApp, duration, queueMode, speakingParams);
  1351. }
  1352. /**
  1353. * Stops all speech output and removes any utterances still in the
  1354. * queue.
  1355. */
  1356. public int stop(String callingApp) {
  1357. return mSelf.stop(callingApp);
  1358. }
  1359. /**
  1360. * Returns whether or not the TTS is speaking.
  1361. *
  1362. * @return Boolean to indicate whether or not the TTS is speaking
  1363. */
  1364. public boolean isSpeaking() {
  1365. return (mSelf.mIsSpeaking && (mSpeechQueue.size() < 1));
  1366. }
  1367. /**
  1368. * Adds a sound resource to the TTS.
  1369. *
  1370. * @param text The text that should be associated with the sound
  1371. * resource
  1372. * @param packageName The name of the package which has the sound
  1373. * resource
  1374. * @param resId The resource ID of the sound within its package
  1375. */
  1376. public void addSpeech(String callingApp, String text, String packageName, int resId) {
  1377. mSelf.addSpeech(callingApp, text, packageName, resId);
  1378. }
  1379. /**
  1380. * Adds a sound resource to the TTS.
  1381. *
  1382. * @param text The text that should be associated with the sound
  1383. * resource
  1384. * @param filename The filename of the sound resource. This must be a
  1385. * complete path like: (/sdcard/mysounds/mysoundbite.mp3).
  1386. */
  1387. public void addSpeechFile(String callingApp, String text, String filename) {
  1388. mSelf.addSpeech(callingApp, text, filename);
  1389. }
  1390. /**
  1391. * Adds a sound resource to the TTS as an earcon.
  1392. *
  1393. * @param earcon The text that should be associated with the sound
  1394. * resource
  1395. * @param packageName The name of the package which has the sound
  1396. * resource
  1397. * @param resId The resource ID of the sound within its package
  1398. */
  1399. public void addEarcon(String callingApp, String earcon, String packageName, int resId) {
  1400. mSelf.addEarcon(callingApp, earcon, packageName, resId);
  1401. }
  1402. /**
  1403. * Adds a sound resource to the TTS as an earcon.
  1404. *
  1405. * @param earcon The text that should be associated with the sound
  1406. * resource
  1407. * @param filename The filename of the sound resource. This must be a
  1408. * c