PageRenderTime 1464ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 0ms

/gensim/models/word2vec.py

https://github.com/lazycrazyowl/gensim
Python | 892 lines | 719 code | 74 blank | 99 comment | 118 complexity | d2c7381d309bb1fb579536a7ba3be8ef MD5 | raw file
Possible License(s): GPL-3.0, LGPL-3.0
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. #
  4. # Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
  5. # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
  6. """
  7. Deep learning via word2vec's "skip-gram and CBOW models", using either
  8. hierarchical softmax or negative sampling [1]_ [2]_.
  9. The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
  10. and extended with additional functionality.
  11. For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
  12. **Install Cython with `pip install cython` to use optimized word2vec training** (70x speedup [3]_).
  13. Initialize a model with e.g.::
  14. >>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
  15. Persist a model to disk with::
  16. >>> model.save(fname)
  17. >>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
  18. The model can also be instantiated from an existing file on disk in the word2vec C format::
  19. >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
  20. >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
  21. You can perform various syntactic/semantic NLP word tasks with the model. Some of them
  22. are already built-in::
  23. >>> model.most_similar(positive=['woman', 'king'], negative=['man'])
  24. [('queen', 0.50882536), ...]
  25. >>> model.doesnt_match("breakfast cereal dinner lunch".split())
  26. 'cereal'
  27. >>> model.similarity('woman', 'man')
  28. 0.73723527
  29. >>> model['computer'] # raw numpy vector of a word
  30. array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
  31. and so on.
  32. If you're finished training a model (=no more updates, only querying), you can do
  33. >>> model.init_sims(replace=True)
  34. to trim unneeded model memory = use (much) less RAM.
  35. .. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
  36. .. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
  37. In Proceedings of NIPS, 2013.
  38. .. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
  39. """
  40. import logging
  41. import sys
  42. import os
  43. import heapq
  44. import time
  45. from copy import deepcopy
  46. import threading
  47. try:
  48. from queue import Queue
  49. except ImportError:
  50. from Queue import Queue
  51. from numpy import exp, dot, zeros, outer, random, dtype, get_include, float32 as REAL,\
  52. uint32, seterr, array, uint8, vstack, argsort, fromstring, sqrt, newaxis, ndarray, empty, sum as np_sum
  53. logger = logging.getLogger("gensim.models.word2vec")
  54. from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
  55. from six import iteritems, itervalues, string_types
  56. from six.moves import xrange
  57. try:
  58. from gensim_addons.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
  59. except ImportError:
  60. try:
  61. # try to compile and use the faster cython version
  62. import pyximport
  63. models_dir = os.path.dirname(__file__) or os.getcwd()
  64. pyximport.install(setup_args={"include_dirs": [models_dir, get_include()]})
  65. from word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
  66. except:
  67. # failed... fall back to plain numpy (20-80x slower training than the above)
  68. FAST_VERSION = -1
  69. def train_sentence_sg(model, sentence, alpha, work=None):
  70. """
  71. Update skip-gram model by training on a single sentence.
  72. The sentence is a list of Vocab objects (or None, where the corresponding
  73. word is not in the vocabulary. Called internally from `Word2Vec.train()`.
  74. This is the non-optimized, Python version. If you have cython installed, gensim
  75. will use the optimized version from word2vec_inner instead.
  76. """
  77. if model.negative:
  78. # precompute negative labels
  79. labels = zeros(model.negative + 1)
  80. labels[0] = 1.0
  81. for pos, word in enumerate(sentence):
  82. if word is None:
  83. continue # OOV word in the input sentence => skip
  84. reduced_window = random.randint(model.window) # `b` in the original word2vec code
  85. # now go over all words from the (reduced) window, predicting each one in turn
  86. start = max(0, pos - model.window + reduced_window)
  87. for pos2, word2 in enumerate(sentence[start : pos + model.window + 1 - reduced_window], start):
  88. # don't train on OOV words and on the `word` itself
  89. if word2 and not (pos2 == pos):
  90. l1 = model.syn0[word2.index]
  91. neu1e = zeros(l1.shape)
  92. if model.hs:
  93. # work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
  94. l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
  95. fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
  96. ga = (1 - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
  97. model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
  98. neu1e += dot(ga, l2a) # save error
  99. if model.negative:
  100. # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
  101. word_indices = [word.index]
  102. while len(word_indices) < model.negative + 1:
  103. w = model.table[random.randint(model.table.shape[0])]
  104. if w != word.index:
  105. word_indices.append(w)
  106. l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
  107. fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
  108. gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
  109. model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
  110. neu1e += dot(gb, l2b) # save error
  111. model.syn0[word2.index] += neu1e # learn input -> hidden
  112. return len([word for word in sentence if word is not None])
  113. def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
  114. """
  115. Update CBOW model by training on a single sentence.
  116. The sentence is a list of Vocab objects (or None, where the corresponding
  117. word is not in the vocabulary. Called internally from `Word2Vec.train()`.
  118. This is the non-optimized, Python version. If you have cython installed, gensim
  119. will use the optimized version from word2vec_inner instead.
  120. """
  121. if model.negative:
  122. # precompute negative labels
  123. labels = zeros(model.negative + 1)
  124. labels[0] = 1.
  125. for pos, word in enumerate(sentence):
  126. if word is None:
  127. continue # OOV word in the input sentence => skip
  128. reduced_window = random.randint(model.window) # `b` in the original word2vec code
  129. start = max(0, pos - model.window + reduced_window)
  130. window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)
  131. word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
  132. l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
  133. if word2_indices and model.cbow_mean:
  134. l1 /= len(word2_indices)
  135. neu1e = zeros(l1.shape)
  136. if model.hs:
  137. l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
  138. fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
  139. ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
  140. model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
  141. neu1e += dot(ga, l2a) # save error
  142. if model.negative:
  143. # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
  144. word_indices = [word.index]
  145. while len(word_indices) < model.negative + 1:
  146. w = model.table[random.randint(model.table.shape[0])]
  147. if w != word.index:
  148. word_indices.append(w)
  149. l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
  150. fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
  151. gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
  152. model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
  153. neu1e += dot(gb, l2b) # save error
  154. model.syn0[word2_indices] += neu1e # learn input -> hidden, here for all words in the window separately
  155. return len([word for word in sentence if word is not None])
  156. class Vocab(object):
  157. """A single vocabulary item, used internally for constructing binary trees (incl. both word leaves and inner nodes)."""
  158. def __init__(self, **kwargs):
  159. self.count = 0
  160. self.__dict__.update(kwargs)
  161. def __lt__(self, other): # used for sorting in a priority queue
  162. return self.count < other.count
  163. def __str__(self):
  164. vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
  165. return "<" + ', '.join(vals) + ">"
  166. class Word2Vec(utils.SaveLoad):
  167. """
  168. Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
  169. The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
  170. compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
  171. """
  172. def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
  173. sample=0, seed=1, workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0, cbow_mean=0):
  174. """
  175. Initialize the model from an iterable of `sentences`. Each sentence is a
  176. list of words (unicode strings) that will be used for training.
  177. The `sentences` iterable can be simply a list, but for larger corpora,
  178. consider an iterable that streams the sentences directly from disk/network.
  179. See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
  180. this module for such examples.
  181. If you don't supply `sentences`, the model is left uninitialized -- use if
  182. you plan to initialize it in some other way.
  183. `sg` defines the training algorithm. By default (`sg=1`), skip-gram is used. Otherwise, `cbow` is employed.
  184. `size` is the dimensionality of the feature vectors.
  185. `window` is the maximum distance between the current and predicted word within a sentence.
  186. `alpha` is the initial learning rate (will linearly drop to zero as training progresses).
  187. `seed` = for the random number generator.
  188. `min_count` = ignore all words with total frequency lower than this.
  189. `sample` = threshold for configuring which higher-frequency words are randomly downsampled;
  190. default is 0 (off), useful value is 1e-5.
  191. `workers` = use this many worker threads to train the model (=faster training with multicore machines)
  192. `hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0)
  193. `negative` = if > 0, negative sampling will be used, the int for negative
  194. specifies how many "noise words" should be drawn (usually between 5-20)
  195. `cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
  196. Only applies when cbow is used.
  197. """
  198. self.vocab = {} # mapping from a word (string) to a Vocab object
  199. self.index2word = [] # map from a word's matrix index (int) to word (string)
  200. self.sg = int(sg)
  201. self.table = None # for negative sampling --> this needs a lot of RAM! consider setting back to None before saving
  202. self.layer1_size = int(size)
  203. if size % 4 != 0:
  204. logger.warning("consider setting layer size to a multiple of 4 for greater performance")
  205. self.alpha = float(alpha)
  206. self.window = int(window)
  207. self.seed = seed
  208. self.min_count = min_count
  209. self.sample = sample
  210. self.workers = workers
  211. self.min_alpha = min_alpha
  212. self.hs = hs
  213. self.negative = negative
  214. self.cbow_mean = int(cbow_mean)
  215. if sentences is not None:
  216. self.build_vocab(sentences)
  217. self.train(sentences)
  218. def make_table(self, table_size=100000000, power=0.75):
  219. """
  220. Create a table using stored vocabulary word counts for drawing random words in the negative
  221. sampling training routines.
  222. Called internally from `build_vocab()`.
  223. """
  224. logger.info("constructing a table with noise distribution from %i words" % len(self.vocab))
  225. # table (= list of words) of noise distribution for negative sampling
  226. vocab_size = len(self.index2word)
  227. self.table = zeros(table_size, dtype=uint32)
  228. if not vocab_size:
  229. logger.warning("empty vocabulary in word2vec, is this intended?")
  230. return
  231. # compute sum of all power (Z in paper)
  232. train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
  233. # go through the whole table and fill it up with the word indexes proportional to a word's count**power
  234. widx = 0
  235. # normalize count^0.75 by Z
  236. d1 = self.vocab[self.index2word[widx]].count**power / train_words_pow
  237. for tidx in xrange(table_size):
  238. self.table[tidx] = widx
  239. if 1.0 * tidx / table_size > d1:
  240. widx += 1
  241. d1 += self.vocab[self.index2word[widx]].count**power / train_words_pow
  242. if widx >= vocab_size:
  243. widx = vocab_size - 1
  244. def create_binary_tree(self):
  245. """
  246. Create a binary Huffman tree using stored vocabulary word counts. Frequent words
  247. will have shorter binary codes. Called internally from `build_vocab()`.
  248. """
  249. logger.info("constructing a huffman tree from %i words" % len(self.vocab))
  250. # build the huffman tree
  251. heap = list(itervalues(self.vocab))
  252. heapq.heapify(heap)
  253. for i in xrange(len(self.vocab) - 1):
  254. min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
  255. heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
  256. # recurse over the tree, assigning a binary code to each vocabulary word
  257. if heap:
  258. max_depth, stack = 0, [(heap[0], [], [])]
  259. while stack:
  260. node, codes, points = stack.pop()
  261. if node.index < len(self.vocab):
  262. # leaf node => store its path from the root
  263. node.code, node.point = codes, points
  264. max_depth = max(len(codes), max_depth)
  265. else:
  266. # inner node => continue recursion
  267. points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
  268. stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
  269. stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
  270. logger.info("built huffman tree with maximum node depth %i" % max_depth)
  271. def precalc_sampling(self):
  272. """Precalculate each vocabulary item's threshold for sampling"""
  273. if self.sample:
  274. logger.info("frequent-word downsampling, threshold %g; progress tallies will be approximate" % (self.sample))
  275. total_words = sum(v.count for v in itervalues(self.vocab))
  276. threshold_count = float(self.sample) * total_words
  277. for v in itervalues(self.vocab):
  278. prob = (sqrt(v.count / threshold_count) + 1) * (threshold_count / v.count) if self.sample else 1.0
  279. v.sample_probability = min(prob, 1.0)
  280. def build_vocab(self, sentences):
  281. """
  282. Build vocabulary from a sequence of sentences (can be a once-only generator stream).
  283. Each sentence must be a list of unicode strings.
  284. """
  285. logger.info("collecting all words and their counts")
  286. sentence_no, vocab = -1, {}
  287. total_words = 0
  288. for sentence_no, sentence in enumerate(sentences):
  289. if sentence_no % 10000 == 0:
  290. logger.info("PROGRESS: at sentence #%i, processed %i words and %i word types" %
  291. (sentence_no, total_words, len(vocab)))
  292. for word in sentence:
  293. total_words += 1
  294. if word in vocab:
  295. vocab[word].count += 1
  296. else:
  297. vocab[word] = Vocab(count=1)
  298. logger.info("collected %i word types from a corpus of %i words and %i sentences" %
  299. (len(vocab), total_words, sentence_no + 1))
  300. # assign a unique index to each word
  301. self.vocab, self.index2word = {}, []
  302. for word, v in iteritems(vocab):
  303. if v.count >= self.min_count:
  304. v.index = len(self.vocab)
  305. self.index2word.append(word)
  306. self.vocab[word] = v
  307. logger.info("total %i word types after removing those with count<%s" % (len(self.vocab), self.min_count))
  308. if self.hs:
  309. # add info about each word's Huffman encoding
  310. self.create_binary_tree()
  311. if self.negative:
  312. # build the table for drawing random words (for negative sampling)
  313. self.make_table()
  314. # precalculate downsampling thresholds
  315. self.precalc_sampling()
  316. self.reset_weights()
  317. def train(self, sentences, total_words=None, word_count=0, chunksize=100):
  318. """
  319. Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
  320. Each sentence must be a list of unicode strings.
  321. """
  322. if FAST_VERSION < 0:
  323. import warnings
  324. warnings.warn("Cython compilation failed, training will be slow. Do you have Cython installed? `pip install cython`")
  325. logger.info("training model with %i workers on %i vocabulary and %i features, "
  326. "using 'skipgram'=%s 'hierarchical softmax'=%s 'subsample'=%s and 'negative sampling'=%s" %
  327. (self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative))
  328. if not self.vocab:
  329. raise RuntimeError("you must first build vocabulary before training the model")
  330. start, next_report = time.time(), [1.0]
  331. word_count = [word_count]
  332. total_words = total_words or int(sum(v.count * v.sample_probability for v in itervalues(self.vocab)))
  333. jobs = Queue(maxsize=2 * self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
  334. lock = threading.Lock() # for shared state (=number of words trained so far, log reports...)
  335. def worker_train():
  336. """Train the model, lifting lists of sentences from the jobs queue."""
  337. work = zeros(self.layer1_size, dtype=REAL) # each thread must have its own work memory
  338. neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
  339. while True:
  340. job = jobs.get()
  341. if job is None: # data finished, exit
  342. break
  343. # update the learning rate before every job
  344. alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * word_count[0] / total_words))
  345. # how many words did we train on? out-of-vocabulary (unknown) words do not count
  346. if self.sg:
  347. job_words = sum(train_sentence_sg(self, sentence, alpha, work) for sentence in job)
  348. else:
  349. job_words = sum(train_sentence_cbow(self, sentence, alpha, work, neu1) for sentence in job)
  350. with lock:
  351. word_count[0] += job_words
  352. elapsed = time.time() - start
  353. if elapsed >= next_report[0]:
  354. logger.info("PROGRESS: at %.2f%% words, alpha %.05f, %.0f words/s" %
  355. (100.0 * word_count[0] / total_words, alpha, word_count[0] / elapsed if elapsed else 0.0))
  356. next_report[0] = elapsed + 1.0 # don't flood the log, wait at least a second between progress reports
  357. workers = [threading.Thread(target=worker_train) for _ in xrange(self.workers)]
  358. for thread in workers:
  359. thread.daemon = True # make interrupting the process with ctrl+c easier
  360. thread.start()
  361. def prepare_sentences():
  362. for sentence in sentences:
  363. # avoid calling random_sample() where prob >= 1, to speed things up a little:
  364. sampled = [self.vocab[word] for word in sentence
  365. if word in self.vocab and (self.vocab[word].sample_probability >= 1.0 or self.vocab[word].sample_probability >= random.random_sample())]
  366. yield sampled
  367. # convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue
  368. for job_no, job in enumerate(utils.grouper(prepare_sentences(), chunksize)):
  369. logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
  370. jobs.put(job)
  371. logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
  372. for _ in xrange(self.workers):
  373. jobs.put(None) # give the workers heads up that they can finish -- no more work!
  374. for thread in workers:
  375. thread.join()
  376. elapsed = time.time() - start
  377. logger.info("training on %i words took %.1fs, %.0f words/s" %
  378. (word_count[0], elapsed, word_count[0] / elapsed if elapsed else 0.0))
  379. return word_count[0]
  380. def reset_weights(self):
  381. """Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
  382. logger.info("resetting layer weights")
  383. random.seed(self.seed)
  384. self.syn0 = empty((len(self.vocab), self.layer1_size), dtype=REAL)
  385. # randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
  386. for i in xrange(len(self.vocab)):
  387. self.syn0[i] = (random.rand(self.layer1_size) - 0.5) / self.layer1_size
  388. if self.hs:
  389. self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
  390. if self.negative:
  391. self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
  392. self.syn0norm = None
  393. def save_word2vec_format(self, fname, fvocab=None, binary=False):
  394. """
  395. Store the input-hidden weight matrix in the same format used by the original
  396. C word2vec-tool, for compatibility.
  397. """
  398. if fvocab is not None:
  399. logger.info("Storing vocabulary in %s" % (fvocab))
  400. with utils.smart_open(fvocab, 'wb') as vout:
  401. for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
  402. vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
  403. logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.layer1_size, fname))
  404. assert (len(self.vocab), self.layer1_size) == self.syn0.shape
  405. with utils.smart_open(fname, 'wb') as fout:
  406. fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
  407. # store in sorted order: most frequent words at the top
  408. for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
  409. row = self.syn0[vocab.index]
  410. if binary:
  411. fout.write(utils.to_utf8(word) + b" " + row.tostring())
  412. else:
  413. fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
  414. @classmethod
  415. def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True):
  416. """
  417. Load the input-hidden weight matrix from the original C word2vec-tool format.
  418. Note that the information stored in the file is incomplete (the binary tree is missing),
  419. so while you can query for word similarity etc., you cannot continue training
  420. with a model loaded this way.
  421. `binary` is a boolean indicating whether the data is in binary word2vec format.
  422. `norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
  423. Word counts are read from `fvocab` filename, if set (this is the file generated
  424. by `-save-vocab` flag of the original C tool).
  425. """
  426. counts = None
  427. if fvocab is not None:
  428. logger.info("loading word counts from %s" % (fvocab))
  429. counts = {}
  430. with utils.smart_open(fvocab) as fin:
  431. for line in fin:
  432. word, count = utils.to_unicode(line).strip().split()
  433. counts[word] = int(count)
  434. logger.info("loading projection weights from %s" % (fname))
  435. with utils.smart_open(fname) as fin:
  436. header = utils.to_unicode(fin.readline())
  437. vocab_size, layer1_size = map(int, header.split()) # throws for invalid file format
  438. result = Word2Vec(size=layer1_size)
  439. result.syn0 = zeros((vocab_size, layer1_size), dtype=REAL)
  440. if binary:
  441. binary_len = dtype(REAL).itemsize * layer1_size
  442. for line_no in xrange(vocab_size):
  443. # mixed text and binary: read text first, then binary
  444. word = []
  445. while True:
  446. ch = fin.read(1)
  447. if ch == b' ':
  448. break
  449. if ch != b'\n': # ignore newlines in front of words (some binary files have newline, some don't)
  450. word.append(ch)
  451. word = utils.to_unicode(b''.join(word))
  452. if counts is None:
  453. result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
  454. elif word in counts:
  455. result.vocab[word] = Vocab(index=line_no, count=counts[word])
  456. else:
  457. logger.warning("vocabulary file is incomplete")
  458. result.vocab[word] = Vocab(index=line_no, count=None)
  459. result.index2word.append(word)
  460. result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
  461. else:
  462. for line_no, line in enumerate(fin):
  463. parts = line.split()
  464. if len(parts) != layer1_size + 1:
  465. raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
  466. word, weights = parts[0], map(REAL, parts[1:])
  467. if counts is None:
  468. result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
  469. elif word in counts:
  470. result.vocab[word] = Vocab(index=line_no, count=counts[word])
  471. else:
  472. logger.warning("vocabulary file is incomplete")
  473. result.vocab[word] = Vocab(index=line_no, count=None)
  474. result.index2word.append(word)
  475. result.syn0[line_no] = weights
  476. logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
  477. result.init_sims(norm_only)
  478. return result
  479. def most_similar(self, positive=[], negative=[], topn=10):
  480. """
  481. Find the top-N most similar words. Positive words contribute positively towards the
  482. similarity, negative words negatively.
  483. This method computes cosine similarity between a simple mean of the projection
  484. weight vectors of the given words, and corresponds to the `word-analogy` and
  485. `distance` scripts in the original word2vec implementation.
  486. Example::
  487. >>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
  488. [('queen', 0.50882536), ...]
  489. """
  490. self.init_sims()
  491. if isinstance(positive, string_types) and not negative:
  492. # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
  493. positive = [positive]
  494. # add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
  495. positive = [(word, 1.0) if isinstance(word, string_types + (ndarray,))
  496. else word for word in positive]
  497. negative = [(word, -1.0) if isinstance(word, string_types + (ndarray,))
  498. else word for word in negative]
  499. # compute the weighted average of all words
  500. all_words, mean = set(), []
  501. for word, weight in positive + negative:
  502. if isinstance(word, ndarray):
  503. mean.append(weight * word)
  504. elif word in self.vocab:
  505. mean.append(weight * self.syn0norm[self.vocab[word].index])
  506. all_words.add(self.vocab[word].index)
  507. else:
  508. raise KeyError("word '%s' not in vocabulary" % word)
  509. if not mean:
  510. raise ValueError("cannot compute similarity with no input")
  511. mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
  512. dists = dot(self.syn0norm, mean)
  513. if not topn:
  514. return dists
  515. best = argsort(dists)[::-1][:topn + len(all_words)]
  516. # ignore (don't return) words from the input
  517. result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
  518. return result[:topn]
  519. def doesnt_match(self, words):
  520. """
  521. Which word from the given list doesn't go with the others?
  522. Example::
  523. >>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
  524. 'cereal'
  525. """
  526. self.init_sims()
  527. words = [word for word in words if word in self.vocab] # filter out OOV words
  528. logger.debug("using words %s" % words)
  529. if not words:
  530. raise ValueError("cannot select a word from an empty list")
  531. vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
  532. mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
  533. dists = dot(vectors, mean)
  534. return sorted(zip(dists, words))[0][1]
  535. def __getitem__(self, word):
  536. """
  537. Return a word's representations in vector space, as a 1D numpy array.
  538. Example::
  539. >>> trained_model['woman']
  540. array([ -1.40128313e-02, ...]
  541. """
  542. return self.syn0[self.vocab[word].index]
  543. def __contains__(self, word):
  544. return word in self.vocab
  545. def similarity(self, w1, w2):
  546. """
  547. Compute cosine similarity between two words.
  548. Example::
  549. >>> trained_model.similarity('woman', 'man')
  550. 0.73723527
  551. >>> trained_model.similarity('woman', 'woman')
  552. 1.0
  553. """
  554. return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
  555. def init_sims(self, replace=False):
  556. """
  557. Precompute L2-normalized vectors.
  558. If `replace` is set, forget the original vectors and only keep the normalized
  559. ones = saves lots of memory!
  560. Note that you **cannot continue training** after doing a replace. The model becomes
  561. effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
  562. """
  563. if getattr(self, 'syn0norm', None) is None or replace:
  564. logger.info("precomputing L2-norms of word weight vectors")
  565. if replace:
  566. for i in xrange(self.syn0.shape[0]):
  567. self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
  568. self.syn0norm = self.syn0
  569. if hasattr(self, 'syn1'):
  570. del self.syn1
  571. else:
  572. self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
  573. def accuracy(self, questions, restrict_vocab=30000):
  574. """
  575. Compute accuracy of the model. `questions` is a filename where lines are
  576. 4-tuples of words, split into sections by ": SECTION NAME" lines.
  577. See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
  578. The accuracy is reported (=printed to log and returned as a list) for each
  579. section separately, plus there's one aggregate summary at the end.
  580. Use `restrict_vocab` to ignore all questions containing a word whose frequency
  581. is not in the top-N most frequent words (default top 30,000).
  582. This method corresponds to the `compute-accuracy` script of the original C word2vec.
  583. """
  584. ok_vocab = dict(sorted(iteritems(self.vocab),
  585. key=lambda item: -item[1].count)[:restrict_vocab])
  586. ok_index = set(v.index for v in itervalues(ok_vocab))
  587. def log_accuracy(section):
  588. correct, incorrect = section['correct'], section['incorrect']
  589. if correct + incorrect > 0:
  590. logger.info("%s: %.1f%% (%i/%i)" %
  591. (section['section'], 100.0 * correct / (correct + incorrect),
  592. correct, correct + incorrect))
  593. sections, section = [], None
  594. for line_no, line in enumerate(utils.smart_open(questions)):
  595. # TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
  596. line = utils.to_unicode(line)
  597. if line.startswith(': '):
  598. # a new section starts => store the old section
  599. if section:
  600. sections.append(section)
  601. log_accuracy(section)
  602. section = {'section': line.lstrip(': ').strip(), 'correct': 0, 'incorrect': 0}
  603. else:
  604. if not section:
  605. raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
  606. try:
  607. a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
  608. except:
  609. logger.info("skipping invalid line #%i in %s" % (line_no, questions))
  610. if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
  611. logger.debug("skipping line #%i with OOV words: %s" % (line_no, line))
  612. continue
  613. ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
  614. predicted = None
  615. # find the most likely prediction, ignoring OOV words and input words
  616. for index in argsort(self.most_similar(positive=[b, c], negative=[a], topn=False))[::-1]:
  617. if index in ok_index and index not in ignore:
  618. predicted = self.index2word[index]
  619. if predicted != expected:
  620. logger.debug("%s: expected %s, predicted %s" % (line.strip(), expected, predicted))
  621. break
  622. section['correct' if predicted == expected else 'incorrect'] += 1
  623. if section:
  624. # store the last section, too
  625. sections.append(section)
  626. log_accuracy(section)
  627. total = {'section': 'total', 'correct': sum(s['correct'] for s in sections), 'incorrect': sum(s['incorrect'] for s in sections)}
  628. log_accuracy(total)
  629. sections.append(total)
  630. return sections
  631. def __str__(self):
  632. return "Word2Vec(vocab=%s, size=%s, alpha=%s)" % (len(self.index2word), self.layer1_size, self.alpha)
  633. def save(self, *args, **kwargs):
  634. kwargs['ignore'] = kwargs.get('ignore', ['syn0norm']) # don't bother storing the cached normalized vectors
  635. super(Word2Vec, self).save(*args, **kwargs)
  636. class BrownCorpus(object):
  637. """Iterate over sentences from the Brown corpus (part of NLTK data)."""
  638. def __init__(self, dirname):
  639. self.dirname = dirname
  640. def __iter__(self):
  641. for fname in os.listdir(self.dirname):
  642. fname = os.path.join(self.dirname, fname)
  643. if not os.path.isfile(fname):
  644. continue
  645. for line in utils.smart_open(fname):
  646. line = utils.to_unicode(line)
  647. # each file line is a single sentence in the Brown corpus
  648. # each token is WORD/POS_TAG
  649. token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
  650. # ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
  651. words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
  652. if not words: # don't bother sending out empty sentences
  653. continue
  654. yield words
  655. class Text8Corpus(object):
  656. """Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
  657. def __init__(self, fname):
  658. self.fname = fname
  659. def __iter__(self):
  660. # the entire corpus is one gigantic line -- there are no sentence marks at all
  661. # so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
  662. sentence, rest, max_sentence_length = [], b'', 1000
  663. with utils.smart_open(self.fname) as fin:
  664. while True:
  665. text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
  666. if text == rest: # EOF
  667. sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
  668. if sentence:
  669. yield sentence
  670. break
  671. last_token = text.rfind(b' ') # the last token may have been split in two... keep it for the next iteration
  672. words, rest = (utils.to_unicode(text[:last_token]).split(), text[last_token:].strip()) if last_token >= 0 else ([], text)
  673. sentence.extend(words)
  674. while len(sentence) >= max_sentence_length:
  675. yield sentence[:max_sentence_length]
  676. sentence = sentence[max_sentence_length:]
  677. class LineSentence(object):
  678. """Simple format: one sentence = one line; words already preprocessed and separated by whitespace."""
  679. def __init__(self, source):
  680. """
  681. `source` can be either a string or a file object.
  682. Example::
  683. sentences = LineSentence('myfile.txt')
  684. Or for compressed files::
  685. sentences = LineSentence('compressed_text.txt.bz2')
  686. sentences = LineSentence('compressed_text.txt.gz')
  687. """
  688. self.source = source
  689. def __iter__(self):
  690. """Iterate through the lines in the source."""
  691. try:
  692. # Assume it is a file-like object and try treating it as such
  693. # Things that don't have seek will trigger an exception
  694. self.source.seek(0)
  695. for line in self.source:
  696. yield utils.to_unicode(line).split()
  697. except AttributeError:
  698. # If it didn't work like a file, use it as a string filename
  699. with utils.smart_open(self.source) as fin:
  700. for line in fin:
  701. yield utils.to_unicode(line).split()
  702. # Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
  703. if __name__ == "__main__":
  704. logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
  705. logging.info("running %s" % " ".join(sys.argv))
  706. logging.info("using optimization %s" % FAST_VERSION)
  707. # check and process cmdline input
  708. program = os.path.basename(sys.argv[0])
  709. if len(sys.argv) < 2:
  710. print(globals()['__doc__'] % locals())
  711. sys.exit(1)
  712. infile = sys.argv[1]
  713. from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
  714. seterr(all='raise') # don't ignore numpy errors
  715. # model = Word2Vec(LineSentence(infile), size=200, min_count=5, workers=4)
  716. model = Word2Vec(Text8Corpus(infile), size=200, min_count=5, workers=1)
  717. if len(sys.argv) > 3:
  718. outfile = sys.argv[3]
  719. model.save(outfile + '.model')
  720. model.save_word2vec_format(outfile + '.model.bin', binary=True)
  721. model.save_word2vec_format(outfile + '.model.txt', binary=False)
  722. if len(sys.argv) > 2:
  723. questions_file = sys.argv[2]
  724. model.accuracy(sys.argv[2])
  725. logging.info("finished running %s" % program)