/DynaBERT/transformers/tokenization_utils.py

https://github.com/huawei-noah/Pretrained-Language-Model · Python · 1068 lines · 976 code · 28 blank · 64 comment · 33 complexity · d4aec69b7fd1e667ca361d93ec6024e6 MD5 · raw file

  1. # coding=utf-8
  2. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes for OpenAI GPT."""
  16. from __future__ import (absolute_import, division, print_function,
  17. unicode_literals)
  18. import logging
  19. import os
  20. import json
  21. import six
  22. import copy
  23. from io import open
  24. from .file_utils import cached_path, is_tf_available, is_torch_available
  25. if is_tf_available():
  26. import tensorflow as tf
  27. if is_torch_available():
  28. import torch
  29. logger = logging.getLogger(__name__)
  30. SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
  31. ADDED_TOKENS_FILE = 'added_tokens.json'
  32. TOKENIZER_CONFIG_FILE = 'tokenizer_config.json'
  33. class PreTrainedTokenizer(object):
  34. """ Base class for all tokenizers.
  35. Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
  36. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
  37. Class attributes (overridden by derived classes):
  38. - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
  39. - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
  40. - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
  41. - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method.
  42. Parameters:
  43. - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id``
  44. - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id``
  45. - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id``
  46. - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id``
  47. - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id``
  48. - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id``
  49. - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
  50. - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
  51. """
  52. vocab_files_names = {}
  53. pretrained_vocab_files_map = {}
  54. pretrained_init_configuration = {}
  55. max_model_input_sizes = {}
  56. SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
  57. "pad_token", "cls_token", "mask_token",
  58. "additional_special_tokens"]
  59. @property
  60. def bos_token(self):
  61. """ Beginning of sentence token (string). Log an error if used while not having been set. """
  62. if self._bos_token is None:
  63. logger.error("Using bos_token, but it is not set yet.")
  64. return self._bos_token
  65. @property
  66. def eos_token(self):
  67. """ End of sentence token (string). Log an error if used while not having been set. """
  68. if self._eos_token is None:
  69. logger.error("Using eos_token, but it is not set yet.")
  70. return self._eos_token
  71. @property
  72. def unk_token(self):
  73. """ Unknown token (string). Log an error if used while not having been set. """
  74. if self._unk_token is None:
  75. logger.error("Using unk_token, but it is not set yet.")
  76. return self._unk_token
  77. @property
  78. def sep_token(self):
  79. """ Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
  80. if self._sep_token is None:
  81. logger.error("Using sep_token, but it is not set yet.")
  82. return self._sep_token
  83. @property
  84. def pad_token(self):
  85. """ Padding token (string). Log an error if used while not having been set. """
  86. if self._pad_token is None:
  87. logger.error("Using pad_token, but it is not set yet.")
  88. return self._pad_token
  89. @property
  90. def cls_token(self):
  91. """ Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
  92. if self._cls_token is None:
  93. logger.error("Using cls_token, but it is not set yet.")
  94. return self._cls_token
  95. @property
  96. def mask_token(self):
  97. """ Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
  98. if self._mask_token is None:
  99. logger.error("Using mask_token, but it is not set yet.")
  100. return self._mask_token
  101. @property
  102. def additional_special_tokens(self):
  103. """ All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
  104. if self._additional_special_tokens is None:
  105. logger.error("Using additional_special_tokens, but it is not set yet.")
  106. return self._additional_special_tokens
  107. @bos_token.setter
  108. def bos_token(self, value):
  109. self._bos_token = value
  110. @eos_token.setter
  111. def eos_token(self, value):
  112. self._eos_token = value
  113. @unk_token.setter
  114. def unk_token(self, value):
  115. self._unk_token = value
  116. @sep_token.setter
  117. def sep_token(self, value):
  118. self._sep_token = value
  119. @pad_token.setter
  120. def pad_token(self, value):
  121. self._pad_token = value
  122. @cls_token.setter
  123. def cls_token(self, value):
  124. self._cls_token = value
  125. @mask_token.setter
  126. def mask_token(self, value):
  127. self._mask_token = value
  128. @additional_special_tokens.setter
  129. def additional_special_tokens(self, value):
  130. self._additional_special_tokens = value
  131. @property
  132. def bos_token_id(self):
  133. """ Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
  134. return self.convert_tokens_to_ids(self.bos_token)
  135. @property
  136. def eos_token_id(self):
  137. """ Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
  138. return self.convert_tokens_to_ids(self.eos_token)
  139. @property
  140. def unk_token_id(self):
  141. """ Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
  142. return self.convert_tokens_to_ids(self.unk_token)
  143. @property
  144. def sep_token_id(self):
  145. """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
  146. return self.convert_tokens_to_ids(self.sep_token)
  147. @property
  148. def pad_token_id(self):
  149. """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """
  150. return self.convert_tokens_to_ids(self.pad_token)
  151. @property
  152. def cls_token_id(self):
  153. """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
  154. return self.convert_tokens_to_ids(self.cls_token)
  155. @property
  156. def mask_token_id(self):
  157. """ Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
  158. return self.convert_tokens_to_ids(self.mask_token)
  159. @property
  160. def additional_special_tokens_ids(self):
  161. """ Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
  162. return self.convert_tokens_to_ids(self.additional_special_tokens)
  163. def __init__(self, max_len=None, **kwargs):
  164. self._bos_token = None
  165. self._eos_token = None
  166. self._unk_token = None
  167. self._sep_token = None
  168. self._pad_token = None
  169. self._cls_token = None
  170. self._mask_token = None
  171. self._additional_special_tokens = []
  172. self.max_len = max_len if max_len is not None else int(1e12)
  173. # Added tokens
  174. self.added_tokens_encoder = {}
  175. self.added_tokens_decoder = {}
  176. # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
  177. self.init_inputs = ()
  178. self.init_kwargs = {}
  179. for key, value in kwargs.items():
  180. if key in self.SPECIAL_TOKENS_ATTRIBUTES:
  181. if key == 'additional_special_tokens':
  182. assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
  183. else:
  184. assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
  185. setattr(self, key, value)
  186. @classmethod
  187. def from_pretrained(cls, *inputs, **kwargs):
  188. r"""
  189. Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
  190. Args:
  191. pretrained_model_name_or_path: either:
  192. - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
  193. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
  194. - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
  195. cache_dir: (`optional`) string:
  196. Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
  197. force_download: (`optional`) boolean, default False:
  198. Force to (re-)download the vocabulary files and override the cached versions if they exists.
  199. proxies: (`optional`) dict, default None:
  200. A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
  201. The proxies are used on each request.
  202. inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
  203. kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
  204. Examples::
  205. # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
  206. # Download vocabulary from S3 and cache.
  207. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  208. # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
  209. tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
  210. # If the tokenizer uses a single vocabulary file, you can point directly to this file
  211. tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
  212. # You can link tokens to special vocabulary when instantiating
  213. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
  214. # You should be sure '<unk>' is in the vocabulary when doing that.
  215. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
  216. assert tokenizer.unk_token == '<unk>'
  217. """
  218. return cls._from_pretrained(*inputs, **kwargs)
  219. @classmethod
  220. def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
  221. cache_dir = kwargs.pop('cache_dir', None)
  222. force_download = kwargs.pop('force_download', False)
  223. proxies = kwargs.pop('proxies', None)
  224. s3_models = list(cls.max_model_input_sizes.keys())
  225. vocab_files = {}
  226. init_configuration = {}
  227. if pretrained_model_name_or_path in s3_models:
  228. # Get the vocabulary from AWS S3 bucket
  229. for file_id, map_list in cls.pretrained_vocab_files_map.items():
  230. vocab_files[file_id] = map_list[pretrained_model_name_or_path]
  231. if cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration:
  232. init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path]
  233. else:
  234. # Get the vocabulary from local files
  235. logger.info(
  236. "Model name '{}' not found in model shortcut name list ({}). "
  237. "Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
  238. pretrained_model_name_or_path, ', '.join(s3_models),
  239. pretrained_model_name_or_path))
  240. # Look for the tokenizer main vocabulary files
  241. for file_id, file_name in cls.vocab_files_names.items():
  242. if os.path.isdir(pretrained_model_name_or_path):
  243. # If a directory is provided we look for the standard filenames
  244. full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
  245. else:
  246. # If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
  247. full_file_name = pretrained_model_name_or_path
  248. if not os.path.exists(full_file_name):
  249. logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
  250. full_file_name = None
  251. vocab_files[file_id] = full_file_name
  252. # Look for the additional tokens files
  253. additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
  254. 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE,
  255. 'tokenizer_config_file': TOKENIZER_CONFIG_FILE,
  256. }
  257. # If a path to a file was provided, get the parent directory
  258. saved_directory = pretrained_model_name_or_path
  259. if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
  260. saved_directory = os.path.dirname(saved_directory)
  261. for file_id, file_name in additional_files_names.items():
  262. full_file_name = os.path.join(saved_directory, file_name)
  263. if not os.path.exists(full_file_name):
  264. logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
  265. full_file_name = None
  266. vocab_files[file_id] = full_file_name
  267. if all(full_file_name is None for full_file_name in vocab_files.values()):
  268. raise EnvironmentError(
  269. "Model name '{}' was not found in tokenizers model name list ({}). "
  270. "We assumed '{}' was a path or url to a directory containing vocabulary files "
  271. "named {} but couldn't find such vocabulary files at this path or url.".format(
  272. pretrained_model_name_or_path, ', '.join(s3_models),
  273. pretrained_model_name_or_path,
  274. list(cls.vocab_files_names.values())))
  275. # Get files from url, cache, or disk depending on the case
  276. try:
  277. resolved_vocab_files = {}
  278. for file_id, file_path in vocab_files.items():
  279. if file_path is None:
  280. resolved_vocab_files[file_id] = None
  281. else:
  282. resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies)
  283. except EnvironmentError:
  284. if pretrained_model_name_or_path in s3_models:
  285. msg = "Couldn't reach server at '{}' to download vocabulary files."
  286. else:
  287. msg = "Model name '{}' was not found in tokenizers model name list ({}). " \
  288. "We assumed '{}' was a path or url to a directory containing vocabulary files " \
  289. "named {}, but couldn't find such vocabulary files at this path or url.".format(
  290. pretrained_model_name_or_path, ', '.join(s3_models),
  291. pretrained_model_name_or_path,
  292. list(cls.vocab_files_names.values()))
  293. raise EnvironmentError(msg)
  294. for file_id, file_path in vocab_files.items():
  295. if file_path == resolved_vocab_files[file_id]:
  296. logger.info("loading file {}".format(file_path))
  297. else:
  298. logger.info("loading file {} from cache at {}".format(
  299. file_path, resolved_vocab_files[file_id]))
  300. # Prepare tokenizer initialization kwargs
  301. # Did we saved some inputs and kwargs to reload ?
  302. tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
  303. if tokenizer_config_file is not None:
  304. init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8"))
  305. saved_init_inputs = init_kwargs.pop('init_inputs', ())
  306. if not init_inputs:
  307. init_inputs = saved_init_inputs
  308. else:
  309. init_kwargs = init_configuration
  310. # Update with newly provided kwargs
  311. init_kwargs.update(kwargs)
  312. # Set max length if needed
  313. if pretrained_model_name_or_path in cls.max_model_input_sizes:
  314. # if we're using a pretrained model, ensure the tokenizer
  315. # wont index sequences longer than the number of positional embeddings
  316. max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
  317. if max_len is not None and isinstance(max_len, (int, float)):
  318. init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(1e12)), max_len)
  319. # Merge resolved_vocab_files arguments in init_kwargs.
  320. added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
  321. special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
  322. for args_name, file_path in resolved_vocab_files.items():
  323. if args_name not in init_kwargs:
  324. init_kwargs[args_name] = file_path
  325. if special_tokens_map_file is not None:
  326. special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
  327. for key, value in special_tokens_map.items():
  328. if key not in init_kwargs:
  329. init_kwargs[key] = value
  330. # Instantiate tokenizer.
  331. tokenizer = cls(*init_inputs, **init_kwargs)
  332. # Save inputs and kwargs for saving and re-loading with ``save_pretrained``
  333. tokenizer.init_inputs = init_inputs
  334. tokenizer.init_kwargs = init_kwargs
  335. # Add supplementary tokens.
  336. if added_tokens_file is not None:
  337. added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
  338. added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
  339. tokenizer.added_tokens_encoder.update(added_tok_encoder)
  340. tokenizer.added_tokens_decoder.update(added_tok_decoder)
  341. return tokenizer
  342. def save_pretrained(self, save_directory):
  343. """ Save the tokenizer vocabulary files together with:
  344. - added tokens,
  345. - special-tokens-to-class-attributes-mapping,
  346. - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
  347. This won't save modifications other than (added tokens and special token mapping) you may have
  348. applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation).
  349. This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
  350. """
  351. if not os.path.isdir(save_directory):
  352. logger.error("Saving directory ({}) should be a directory".format(save_directory))
  353. return
  354. special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
  355. added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
  356. tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
  357. tokenizer_config = copy.deepcopy(self.init_kwargs)
  358. tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
  359. for file_id in self.vocab_files_names.keys():
  360. tokenizer_config.pop(file_id, None)
  361. with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
  362. f.write(json.dumps(tokenizer_config, ensure_ascii=False))
  363. with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
  364. f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
  365. with open(added_tokens_file, 'w', encoding='utf-8') as f:
  366. if self.added_tokens_encoder:
  367. out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
  368. else:
  369. out_str = u"{}"
  370. f.write(out_str)
  371. vocab_files = self.save_vocabulary(save_directory)
  372. return vocab_files + (special_tokens_map_file, added_tokens_file)
  373. def save_vocabulary(self, save_directory):
  374. """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
  375. and special token mappings.
  376. Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
  377. """
  378. raise NotImplementedError
  379. def vocab_size(self):
  380. """ Size of the base vocabulary (without the added tokens) """
  381. raise NotImplementedError
  382. def __len__(self):
  383. """ Size of the full vocabulary with the added tokens """
  384. return self.vocab_size + len(self.added_tokens_encoder)
  385. def add_tokens(self, new_tokens):
  386. """
  387. Add a list of new tokens to the tokenizer class. If the new tokens are not in the
  388. vocabulary, they are added to it with indices starting from length of the current vocabulary.
  389. Args:
  390. new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
  391. Returns:
  392. Number of tokens added to the vocabulary.
  393. Examples::
  394. # Let's see how to increase the vocabulary of Bert model and tokenizer
  395. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  396. model = BertModel.from_pretrained('bert-base-uncased')
  397. num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
  398. print('We have added', num_added_toks, 'tokens')
  399. model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
  400. """
  401. if not new_tokens:
  402. return 0
  403. to_add_tokens = []
  404. for token in new_tokens:
  405. assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
  406. if token != self.unk_token and \
  407. self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and \
  408. token not in to_add_tokens:
  409. to_add_tokens.append(token)
  410. logger.info("Adding %s to the vocabulary", token)
  411. added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
  412. added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
  413. self.added_tokens_encoder.update(added_tok_encoder)
  414. self.added_tokens_decoder.update(added_tok_decoder)
  415. return len(to_add_tokens)
  416. def num_added_tokens(self, pair=False):
  417. """
  418. Returns the number of added tokens when encoding a sequence with special tokens.
  419. Note:
  420. This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
  421. inside your training loop.
  422. Args:
  423. pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
  424. number of added tokens in the case of a single sequence if set to False.
  425. Returns:
  426. Number of tokens added to sequences
  427. """
  428. token_ids_0 = []
  429. token_ids_1 = []
  430. return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
  431. def add_special_tokens(self, special_tokens_dict):
  432. """
  433. Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
  434. to class attributes. If special tokens are NOT in the vocabulary, they are added
  435. to it (indexed starting from the last index of the current vocabulary).
  436. Using `add_special_tokens` will ensure your special tokens can be used in several ways:
  437. - special tokens are carefully handled by the tokenizer (they are never split)
  438. - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
  439. When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
  440. Args:
  441. special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
  442. [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
  443. ``additional_special_tokens``].
  444. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
  445. Returns:
  446. Number of tokens added to the vocabulary.
  447. Examples::
  448. # Let's see how to add a new classification token to GPT-2
  449. tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
  450. model = GPT2Model.from_pretrained('gpt2')
  451. special_tokens_dict = {'cls_token': '<CLS>'}
  452. num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
  453. print('We have added', num_added_toks, 'tokens')
  454. model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
  455. assert tokenizer.cls_token == '<CLS>'
  456. """
  457. if not special_tokens_dict:
  458. return 0
  459. added_tokens = 0
  460. for key, value in special_tokens_dict.items():
  461. assert key in self.SPECIAL_TOKENS_ATTRIBUTES
  462. if key == 'additional_special_tokens':
  463. assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
  464. added_tokens += self.add_tokens(value)
  465. else:
  466. assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
  467. added_tokens += self.add_tokens([value])
  468. logger.info("Assigning %s to the %s key of the tokenizer", value, key)
  469. setattr(self, key, value)
  470. return added_tokens
  471. def tokenize(self, text, **kwargs):
  472. """ Converts a string in a sequence of tokens (string), using the tokenizer.
  473. Split in words for word-based vocabulary or sub-words for sub-word-based
  474. vocabularies (BPE/SentencePieces/WordPieces).
  475. Take care of added tokens.
  476. """
  477. def split_on_token(tok, text):
  478. result = []
  479. split_text = text.split(tok)
  480. for i, sub_text in enumerate(split_text):
  481. sub_text = sub_text.strip()
  482. if i == 0 and not sub_text:
  483. result += [tok]
  484. elif i == len(split_text) - 1:
  485. if sub_text:
  486. result += [sub_text]
  487. else:
  488. pass
  489. else:
  490. if sub_text:
  491. result += [sub_text]
  492. result += [tok]
  493. return result
  494. def split_on_tokens(tok_list, text):
  495. if not text:
  496. return []
  497. if not tok_list:
  498. return self._tokenize(text, **kwargs)
  499. tokenized_text = []
  500. text_list = [text]
  501. for tok in tok_list:
  502. tokenized_text = []
  503. for sub_text in text_list:
  504. if sub_text not in self.added_tokens_encoder \
  505. and sub_text not in self.all_special_tokens:
  506. tokenized_text += split_on_token(tok, sub_text)
  507. else:
  508. tokenized_text += [sub_text]
  509. text_list = tokenized_text
  510. return sum((self._tokenize(token, **kwargs) if token not \
  511. in self.added_tokens_encoder and token not in self.all_special_tokens \
  512. else [token] for token in tokenized_text), [])
  513. added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
  514. tokenized_text = split_on_tokens(added_tokens, text)
  515. return tokenized_text
  516. def _tokenize(self, text, **kwargs):
  517. """ Converts a string in a sequence of tokens (string), using the tokenizer.
  518. Split in words for word-based vocabulary or sub-words for sub-word-based
  519. vocabularies (BPE/SentencePieces/WordPieces).
  520. Do NOT take care of added tokens.
  521. """
  522. raise NotImplementedError
  523. def convert_tokens_to_ids(self, tokens):
  524. """ Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
  525. (resp. a sequence of ids), using the vocabulary.
  526. """
  527. if tokens is None:
  528. return None
  529. if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
  530. return self._convert_token_to_id_with_added_voc(tokens)
  531. ids = []
  532. for token in tokens:
  533. ids.append(self._convert_token_to_id_with_added_voc(token))
  534. if len(ids) > self.max_len:
  535. logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
  536. "for this model ({} > {}). Running this sequence through the model will result in "
  537. "indexing errors".format(len(ids), self.max_len))
  538. return ids
  539. def _convert_token_to_id_with_added_voc(self, token):
  540. if token is None:
  541. return None
  542. if token in self.added_tokens_encoder:
  543. return self.added_tokens_encoder[token]
  544. return self._convert_token_to_id(token)
  545. def _convert_token_to_id(self, token):
  546. raise NotImplementedError
  547. def encode(self,
  548. text,
  549. text_pair=None,
  550. add_special_tokens=False,
  551. max_length=None,
  552. stride=0,
  553. truncation_strategy='longest_first',
  554. return_tensors=None,
  555. **kwargs):
  556. """
  557. Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
  558. Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
  559. Args:
  560. text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
  561. the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  562. method)
  563. text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
  564. string using the `tokenize` method) or a list of integers (tokenized string ids using the
  565. `convert_tokens_to_ids` method)
  566. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
  567. to their model.
  568. max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
  569. If there are overflowing tokens, those will be added to the returned dictionary
  570. stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
  571. from the main sequence returned. The value of this argument defines the number of additional tokens.
  572. truncation_strategy: string selected in the following options:
  573. - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
  574. starting from the longest one at each token (when there is a pair of input sequences)
  575. - 'only_first': Only truncate the first sequence
  576. - 'only_second': Only truncate the second sequence
  577. - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
  578. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
  579. or PyTorch torch.Tensor instead of a list of python integers.
  580. **kwargs: passed to the `self.tokenize()` method
  581. """
  582. encoded_inputs = self.encode_plus(text,
  583. text_pair=text_pair,
  584. max_length=max_length,
  585. add_special_tokens=add_special_tokens,
  586. stride=stride,
  587. truncation_strategy=truncation_strategy,
  588. return_tensors=return_tensors,
  589. **kwargs)
  590. return encoded_inputs["input_ids"]
  591. def encode_plus(self,
  592. text,
  593. text_pair=None,
  594. add_special_tokens=False,
  595. max_length=None,
  596. stride=0,
  597. truncation_strategy='longest_first',
  598. return_tensors=None,
  599. **kwargs):
  600. """
  601. Returns a dictionary containing the encoded sequence or sequence pair and additional informations:
  602. the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
  603. Args:
  604. text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
  605. the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  606. method)
  607. text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
  608. string using the `tokenize` method) or a list of integers (tokenized string ids using the
  609. `convert_tokens_to_ids` method)
  610. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
  611. to their model.
  612. max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
  613. If there are overflowing tokens, those will be added to the returned dictionary
  614. stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
  615. from the main sequence returned. The value of this argument defines the number of additional tokens.
  616. truncation_strategy: string selected in the following options:
  617. - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
  618. starting from the longest one at each token (when there is a pair of input sequences)
  619. - 'only_first': Only truncate the first sequence
  620. - 'only_second': Only truncate the second sequence
  621. - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
  622. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
  623. or PyTorch torch.Tensor instead of a list of python integers.
  624. **kwargs: passed to the `self.tokenize()` method
  625. """
  626. def get_input_ids(text):
  627. if isinstance(text, six.string_types):
  628. return self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
  629. elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], six.string_types):
  630. return self.convert_tokens_to_ids(text)
  631. elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
  632. return text
  633. else:
  634. raise ValueError("Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.")
  635. first_ids = get_input_ids(text)
  636. second_ids = get_input_ids(text_pair) if text_pair is not None else None
  637. return self.prepare_for_model(first_ids,
  638. pair_ids=second_ids,
  639. max_length=max_length,
  640. add_special_tokens=add_special_tokens,
  641. stride=stride,
  642. truncation_strategy=truncation_strategy,
  643. return_tensors=return_tensors)
  644. def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0,
  645. truncation_strategy='longest_first', return_tensors=None):
  646. """
  647. Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
  648. It adds special tokens, truncates
  649. sequences if overflowing while taking into account the special tokens and manages a window stride for
  650. overflowing tokens
  651. Args:
  652. ids: list of tokenized input ids. Can be obtained from a string by chaining the
  653. `tokenize` and `convert_tokens_to_ids` methods.
  654. pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
  655. `tokenize` and `convert_tokens_to_ids` methods.
  656. max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
  657. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
  658. to their model.
  659. stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential
  660. list of inputs.
  661. truncation_strategy: string selected in the following options:
  662. - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
  663. starting from the longest one at each token (when there is a pair of input sequences)
  664. - 'only_first': Only truncate the first sequence
  665. - 'only_second': Only truncate the second sequence
  666. - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
  667. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
  668. or PyTorch torch.Tensor instead of a list of python integers.
  669. Return:
  670. A Dictionary of shape::
  671. {
  672. input_ids: list[int],
  673. overflowing_tokens: list[int] if a ``max_length`` is specified, else None
  674. special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
  675. }
  676. With the fields:
  677. ``input_ids``: list of tokens to be fed to a model
  678. ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
  679. ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
  680. tokens and 1 specifying sequence tokens.
  681. """
  682. pair = bool(pair_ids is not None)
  683. len_ids = len(ids)
  684. len_pair_ids = len(pair_ids) if pair else 0
  685. encoded_inputs = {}
  686. total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0)
  687. if max_length and total_len > max_length:
  688. ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids,
  689. num_tokens_to_remove=total_len-max_length,
  690. truncation_strategy=truncation_strategy,
  691. stride=stride)
  692. encoded_inputs["overflowing_tokens"] = overflowing_tokens
  693. encoded_inputs["num_truncated_tokens"] = total_len - max_length
  694. if add_special_tokens:
  695. sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
  696. token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
  697. encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
  698. else:
  699. sequence = ids + pair_ids if pair else ids
  700. token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
  701. if return_tensors == 'tf' and is_tf_available():
  702. sequence = tf.constant([sequence])
  703. token_type_ids = tf.constant([token_type_ids])
  704. elif return_tensors == 'pt' and is_torch_available():
  705. sequence = torch.tensor([sequence])
  706. token_type_ids = torch.tensor([token_type_ids])
  707. elif return_tensors is not None:
  708. logger.warning("Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(return_tensors))
  709. encoded_inputs["input_ids"] = sequence
  710. encoded_inputs["token_type_ids"] = token_type_ids
  711. if max_length and len(encoded_inputs["input_ids"]) > max_length:
  712. encoded_inputs["input_ids"] = encoded_inputs["input_ids"][:max_length]
  713. encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length]
  714. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length]
  715. return encoded_inputs
  716. def truncate_sequences(self, ids, pair_ids=None, num_tokens_to_remove=0, truncation_strategy='longest_first', stride=0):
  717. """Truncates a sequence pair in place to the maximum length.
  718. truncation_strategy: string selected in the following options:
  719. - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
  720. starting from the longest one at each token (when there is a pair of input sequences).
  721. Overflowing tokens only contains overflow from the first sequence.
  722. - 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
  723. - 'only_second': Only truncate the second sequence
  724. - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
  725. """
  726. if num_tokens_to_remove <= 0:
  727. return ids, pair_ids, []
  728. if truncation_strategy == 'longest_first':
  729. overflowing_tokens = []
  730. for _ in range(num_tokens_to_remove):
  731. if pair_ids is None or len(ids) > len(pair_ids):
  732. overflowing_tokens = [ids[-1]] + overflowing_tokens
  733. ids = ids[:-1]
  734. else:
  735. pair_ids = pair_ids[:-1]
  736. window_len = min(len(ids), stride)
  737. if window_len > 0:
  738. overflowing_tokens = ids[-window_len:] + overflowing_tokens
  739. elif truncation_strategy == 'only_first':
  740. assert len(ids) > num_tokens_to_remove
  741. window_len = min(len(ids), stride + num_tokens_to_remove)
  742. overflowing_tokens = ids[-window_len:]
  743. ids = ids[:-num_tokens_to_remove]
  744. elif truncation_strategy == 'only_second':
  745. assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
  746. window_len = min(len(pair_ids), stride + num_tokens_to_remove)
  747. overflowing_tokens = pair_ids[-window_len:]
  748. pair_ids = pair_ids[:-num_tokens_to_remove]
  749. elif truncation_strategy == 'do_not_truncate':
  750. raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
  751. else:
  752. raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']")
  753. return (ids, pair_ids, overflowing_tokens)
  754. def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
  755. logger.warning("This tokenizer does not make use of special tokens.")
  756. if token_ids_1 is None:
  757. return len(token_ids_0) * [0]
  758. return [0] * len(token_ids_0) + [1] * len(token_ids_1)
  759. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  760. """
  761. Build model inputs from a sequence or a pair of sequence for sequence classification tasks
  762. by concatenating and adding special tokens.
  763. A RoBERTa sequence has the following format:
  764. single sequence: <s> X </s>
  765. pair of sequences: <s> A </s></s> B </s>
  766. """
  767. logger.warning("This tokenizer does not make use of special tokens. Input is returned with no modification.")
  768. if token_ids_1 is None:
  769. return token_ids_0
  770. return token_ids_0 + token_ids_1
  771. def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
  772. """
  773. Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
  774. special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
  775. Args:
  776. token_ids_0: list of ids (must not contain special tokens)
  777. token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
  778. for sequence pairs
  779. already_has_special_tokens: (default False) Set to True if the token list is already formated with
  780. special tokens for the model
  781. Returns:
  782. A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
  783. """
  784. return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
  785. def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
  786. """ Converts a single index or a sequence of indices (integers) in a token "
  787. (resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
  788. Args:
  789. skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
  790. """
  791. if isinstance(ids, int):
  792. if ids in self.added_tokens_decoder:
  793. return self.added_tokens_decoder[ids]
  794. else:
  795. return self._convert_id_to_token(ids)
  796. tokens = []
  797. for index in ids:
  798. if skip_special_tokens and index in self.all_special_ids:
  799. continue
  800. if index in self.added_tokens_decoder:
  801. tokens.append(self.added_tokens_decoder[index])
  802. else:
  803. tokens.append(self._convert_id_to_token(index))
  804. return tokens
  805. def _convert_id_to_token(self, index):
  806. raise NotImplementedError
  807. def convert_tokens_to_string(self, tokens):
  808. """ Converts a sequence of tokens (string) in a single string.
  809. The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
  810. but we often want to remove sub-word tokenization artifacts at the same time.
  811. """
  812. return ' '.join(self.convert_ids_to_tokens(tokens))
  813. def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
  814. """
  815. Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
  816. with options to remove special tokens and clean up tokenization spaces.
  817. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
  818. Args:
  819. token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
  820. skip_special_tokens: if set to True, will replace special tokens.
  821. clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
  822. """
  823. filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
  824. # To avoid mixing byte-level and unicode for byte-level BPT
  825. # we need to build string separatly for added tokens and byte-level tokens
  826. # cf. https://github.com/huggingface/transformers/issues/1133
  827. sub_texts = []
  828. current_sub_text = []
  829. for token in filtered_tokens:
  830. if skip_special_tokens and token in self.all_special_ids:
  831. continue
  832. if token in self.added_tokens_encoder:
  833. if current_sub_text:
  834. sub_texts.append(self.convert_tokens_to_string(current_sub_text))
  835. current_sub_text = []
  836. sub_texts.append(" " + token)
  837. else:
  838. current_sub_text.append(token)
  839. if current_sub_text:
  840. sub_texts.append(self.convert_tokens_to_string(current_sub_text))
  841. text = ''.join(sub_texts)
  842. if clean_up_tokenization_spaces:
  843. clean_text = self.clean_up_tokenization(text)
  844. return clean_text
  845. else:
  846. return text
  847. @property
  848. def special_tokens_map(self):
  849. """ A dictionary mapping special token class attribute (cls_token, unk_token...) to their
  850. values ('<unk>', '<cls>'...)
  851. """
  852. set_attr = {}
  853. for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
  854. attr_value = getattr(self, "_" + attr)
  855. if attr_value:
  856. set_attr[attr] = attr_value
  857. return set_attr
  858. @property
  859. def all_special_tokens(self):
  860. """ List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
  861. (cls_token, unk_token...).
  862. """
  863. all_toks = []
  864. set_attr = self.special_tokens_map
  865. for attr_value in set_attr.values():
  866. all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
  867. all_toks = list(set(all_toks))
  868. return all_toks
  869. @property
  870. def all_special_ids(self):
  871. """ List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
  872. class attributes (cls_token, unk_token...).
  873. """
  874. all_toks = self.all_special_tokens
  875. all_ids = list(self._convert_token_to_id(t) for t in all_toks)
  876. return all_ids
  877. @staticmethod
  878. def clean_up_tokenization(out_string):
  879. """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
  880. """
  881. out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
  882. ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
  883. ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
  884. return out_string