PageRenderTime 44ms CodeModel.GetById 10ms RepoModel.GetById 1ms app.codeStats 0ms

/Bio/GenBank/__init__.py

https://gitlab.com/smilefreak/biopython
Python | 1266 lines | 1246 code | 2 blank | 18 comment | 2 complexity | c6acc94325e942de7a49f302a16e107c MD5 | raw file
  1. # Copyright 2000 by Jeffrey Chang, Brad Chapman. All rights reserved.
  2. # Copyright 2006-2013 by Peter Cock. All rights reserved.
  3. #
  4. # This code is part of the Biopython distribution and governed by its
  5. # license. Please see the LICENSE file that should have been included
  6. # as part of this package.
  7. """Code to work with GenBank formatted files.
  8. Rather than using Bio.GenBank, you are now encouraged to use Bio.SeqIO with
  9. the "genbank" or "embl" format names to parse GenBank or EMBL files into
  10. SeqRecord and SeqFeature objects (see the Biopython tutorial for details).
  11. Using Bio.GenBank directly to parse GenBank files is only useful if you want
  12. to obtain GenBank-specific Record objects, which is a much closer
  13. representation to the raw file contents that the SeqRecord alternative from
  14. the FeatureParser (used in Bio.SeqIO).
  15. To use the Bio.GenBank parser, there are two helper functions:
  16. read Parse a handle containing a single GenBank record
  17. as Bio.GenBank specific Record objects.
  18. parse Iterate over a handle containing multiple GenBank
  19. records as Bio.GenBank specific Record objects.
  20. The following internal classes are not intended for direct use and may
  21. be deprecated in a future release.
  22. Classes:
  23. Iterator Iterate through a file of GenBank entries
  24. ErrorFeatureParser Catch errors caused during parsing.
  25. FeatureParser Parse GenBank data in SeqRecord and SeqFeature objects.
  26. RecordParser Parse GenBank data into a Record object.
  27. Exceptions:
  28. ParserFailureError Exception indicating a failure in the parser (ie.
  29. scanner or consumer)
  30. LocationParserError Exception indiciating a problem with the spark based
  31. location parser.
  32. """
  33. from __future__ import print_function
  34. import re
  35. import sys # for checking if Python 2
  36. # other Biopython stuff
  37. from Bio import SeqFeature
  38. # other Bio.GenBank stuff
  39. from .utils import FeatureValueCleaner
  40. from .Scanner import GenBankScanner
  41. #Constants used to parse GenBank header lines
  42. GENBANK_INDENT = 12
  43. GENBANK_SPACER = " " * GENBANK_INDENT
  44. #Constants for parsing GenBank feature lines
  45. FEATURE_KEY_INDENT = 5
  46. FEATURE_QUALIFIER_INDENT = 21
  47. FEATURE_KEY_SPACER = " " * FEATURE_KEY_INDENT
  48. FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT
  49. #Regular expressions for location parsing
  50. _solo_location = r"[<>]?\d+"
  51. _pair_location = r"[<>]?\d+\.\.[<>]?\d+"
  52. _between_location = r"\d+\^\d+"
  53. _within_position = r"\(\d+\.\d+\)"
  54. _re_within_position = re.compile(_within_position)
  55. _within_location = r"([<>]?\d+|%s)\.\.([<>]?\d+|%s)" \
  56. % (_within_position, _within_position)
  57. assert _re_within_position.match("(3.9)")
  58. assert re.compile(_within_location).match("(3.9)..10")
  59. assert re.compile(_within_location).match("26..(30.33)")
  60. assert re.compile(_within_location).match("(13.19)..(20.28)")
  61. _oneof_position = r"one\-of\(\d+(,\d+)+\)"
  62. _re_oneof_position = re.compile(_oneof_position)
  63. _oneof_location = r"([<>]?\d+|%s)\.\.([<>]?\d+|%s)" \
  64. % (_oneof_position, _oneof_position)
  65. assert _re_oneof_position.match("one-of(6,9)")
  66. assert re.compile(_oneof_location).match("one-of(6,9)..101")
  67. assert re.compile(_oneof_location).match("one-of(6,9)..one-of(101,104)")
  68. assert re.compile(_oneof_location).match("6..one-of(101,104)")
  69. assert not _re_oneof_position.match("one-of(3)")
  70. assert _re_oneof_position.match("one-of(3,6)")
  71. assert _re_oneof_position.match("one-of(3,6,9)")
  72. _simple_location = r"\d+\.\.\d+"
  73. _re_simple_location = re.compile(r"^%s$" % _simple_location)
  74. _re_simple_compound = re.compile(r"^(join|order|bond)\(%s(,%s)*\)$"
  75. % (_simple_location, _simple_location))
  76. _complex_location = r"([a-zA-z][a-zA-Z0-9_]*(\.[a-zA-Z0-9]+)?\:)?(%s|%s|%s|%s|%s)" \
  77. % (_pair_location, _solo_location, _between_location,
  78. _within_location, _oneof_location)
  79. _re_complex_location = re.compile(r"^%s$" % _complex_location)
  80. _possibly_complemented_complex_location = r"(%s|complement\(%s\))" \
  81. % (_complex_location, _complex_location)
  82. _re_complex_compound = re.compile(r"^(join|order|bond)\(%s(,%s)*\)$"
  83. % (_possibly_complemented_complex_location,
  84. _possibly_complemented_complex_location))
  85. assert _re_simple_location.match("104..160")
  86. assert not _re_simple_location.match("68451760..68452073^68452074")
  87. assert not _re_simple_location.match("<104..>160")
  88. assert not _re_simple_location.match("104")
  89. assert not _re_simple_location.match("<1")
  90. assert not _re_simple_location.match(">99999")
  91. assert not _re_simple_location.match("join(104..160,320..390,504..579)")
  92. assert not _re_simple_compound.match("bond(12,63)")
  93. assert _re_simple_compound.match("join(104..160,320..390,504..579)")
  94. assert _re_simple_compound.match("order(1..69,1308..1465)")
  95. assert not _re_simple_compound.match("order(1..69,1308..1465,1524)")
  96. assert not _re_simple_compound.match("join(<1..442,992..1228,1524..>1983)")
  97. assert not _re_simple_compound.match("join(<1..181,254..336,422..497,574..>590)")
  98. assert not _re_simple_compound.match("join(1475..1577,2841..2986,3074..3193,3314..3481,4126..>4215)")
  99. assert not _re_simple_compound.match("test(1..69,1308..1465)")
  100. assert not _re_simple_compound.match("complement(1..69)")
  101. assert not _re_simple_compound.match("(1..69)")
  102. assert _re_complex_location.match("(3.9)..10")
  103. assert _re_complex_location.match("26..(30.33)")
  104. assert _re_complex_location.match("(13.19)..(20.28)")
  105. assert _re_complex_location.match("41^42") # between
  106. assert _re_complex_location.match("AL121804:41^42")
  107. assert _re_complex_location.match("AL121804:41..610")
  108. assert _re_complex_location.match("AL121804.2:41..610")
  109. assert _re_complex_location.match("one-of(3,6)..101")
  110. assert _re_complex_compound.match("join(153490..154269,AL121804.2:41..610,AL121804.2:672..1487)")
  111. assert not _re_simple_compound.match("join(153490..154269,AL121804.2:41..610,AL121804.2:672..1487)")
  112. assert _re_complex_compound.match("join(complement(69611..69724),139856..140650)")
  113. #Trans-spliced example from NC_016406, note underscore in reference name:
  114. assert _re_complex_location.match("NC_016402.1:6618..6676")
  115. assert _re_complex_location.match("181647..181905")
  116. assert _re_complex_compound.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
  117. assert not _re_complex_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
  118. assert not _re_simple_compound.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
  119. assert not _re_complex_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
  120. assert not _re_simple_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
  121. _solo_bond = re.compile("bond\(%s\)" % _solo_location)
  122. assert _solo_bond.match("bond(196)")
  123. assert _solo_bond.search("bond(196)")
  124. assert _solo_bond.search("join(bond(284),bond(305),bond(309),bond(305))")
  125. def _pos(pos_str, offset=0):
  126. """Build a Position object (PRIVATE).
  127. For an end position, leave offset as zero (default):
  128. >>> _pos("5")
  129. ExactPosition(5)
  130. For a start position, set offset to minus one (for Python counting):
  131. >>> _pos("5", -1)
  132. ExactPosition(4)
  133. This also covers fuzzy positions:
  134. >>> p = _pos("<5")
  135. >>> p
  136. BeforePosition(5)
  137. >>> print(p)
  138. <5
  139. >>> int(p)
  140. 5
  141. >>> _pos(">5")
  142. AfterPosition(5)
  143. By default assumes an end position, so note the integer behaviour:
  144. >>> p = _pos("one-of(5,8,11)")
  145. >>> p
  146. OneOfPosition(11, choices=[ExactPosition(5), ExactPosition(8), ExactPosition(11)])
  147. >>> print(p)
  148. one-of(5,8,11)
  149. >>> int(p)
  150. 11
  151. >>> _pos("(8.10)")
  152. WithinPosition(10, left=8, right=10)
  153. Fuzzy start positions:
  154. >>> p = _pos("<5", -1)
  155. >>> p
  156. BeforePosition(4)
  157. >>> print(p)
  158. <4
  159. >>> int(p)
  160. 4
  161. Notice how the integer behaviour changes too!
  162. >>> p = _pos("one-of(5,8,11)", -1)
  163. >>> p
  164. OneOfPosition(4, choices=[ExactPosition(4), ExactPosition(7), ExactPosition(10)])
  165. >>> print(p)
  166. one-of(4,7,10)
  167. >>> int(p)
  168. 4
  169. """
  170. if pos_str.startswith("<"):
  171. return SeqFeature.BeforePosition(int(pos_str[1:])+offset)
  172. elif pos_str.startswith(">"):
  173. return SeqFeature.AfterPosition(int(pos_str[1:])+offset)
  174. elif _re_within_position.match(pos_str):
  175. s, e = pos_str[1:-1].split(".")
  176. s = int(s) + offset
  177. e = int(e) + offset
  178. if offset == -1:
  179. default = s
  180. else:
  181. default = e
  182. return SeqFeature.WithinPosition(default, left=s, right=e)
  183. elif _re_oneof_position.match(pos_str):
  184. assert pos_str.startswith("one-of(")
  185. assert pos_str[-1]==")"
  186. parts = [SeqFeature.ExactPosition(int(pos)+offset)
  187. for pos in pos_str[7:-1].split(",")]
  188. if offset == -1:
  189. default = min(int(pos) for pos in parts)
  190. else:
  191. default = max(int(pos) for pos in parts)
  192. return SeqFeature.OneOfPosition(default, choices=parts)
  193. else:
  194. return SeqFeature.ExactPosition(int(pos_str)+offset)
  195. def _loc(loc_str, expected_seq_length, strand):
  196. """FeatureLocation from non-compound non-complement location (PRIVATE).
  197. Simple examples,
  198. >>> _loc("123..456", 1000, +1)
  199. FeatureLocation(ExactPosition(122), ExactPosition(456), strand=1)
  200. >>> _loc("<123..>456", 1000, strand = -1)
  201. FeatureLocation(BeforePosition(122), AfterPosition(456), strand=-1)
  202. A more complex location using within positions,
  203. >>> _loc("(9.10)..(20.25)", 1000, 1)
  204. FeatureLocation(WithinPosition(8, left=8, right=9), WithinPosition(25, left=20, right=25), strand=1)
  205. Notice how that will act as though it has overall start 8 and end 25.
  206. Zero length between feature,
  207. >>> _loc("123^124", 1000, 0)
  208. FeatureLocation(ExactPosition(123), ExactPosition(123), strand=0)
  209. The expected sequence length is needed for a special case, a between
  210. position at the start/end of a circular genome:
  211. >>> _loc("1000^1", 1000, 1)
  212. FeatureLocation(ExactPosition(1000), ExactPosition(1000), strand=1)
  213. Apart from this special case, between positions P^Q must have P+1==Q,
  214. >>> _loc("123^456", 1000, 1)
  215. Traceback (most recent call last):
  216. ...
  217. ValueError: Invalid between location '123^456'
  218. """
  219. try:
  220. s, e = loc_str.split("..")
  221. except ValueError:
  222. assert ".." not in loc_str
  223. if "^" in loc_str:
  224. #A between location like "67^68" (one based counting) is a
  225. #special case (note it has zero length). In python slice
  226. #notation this is 67:67, a zero length slice. See Bug 2622
  227. #Further more, on a circular genome of length N you can have
  228. #a location N^1 meaning the junction at the origin. See Bug 3098.
  229. #NOTE - We can imagine between locations like "2^4", but this
  230. #is just "3". Similarly, "2^5" is just "3..4"
  231. s, e = loc_str.split("^")
  232. if int(s)+1==int(e):
  233. pos = _pos(s)
  234. elif int(s)==expected_seq_length and e=="1":
  235. pos = _pos(s)
  236. else:
  237. raise ValueError("Invalid between location %s" % repr(loc_str))
  238. return SeqFeature.FeatureLocation(pos, pos, strand)
  239. else:
  240. #e.g. "123"
  241. s = loc_str
  242. e = loc_str
  243. return SeqFeature.FeatureLocation(_pos(s, -1), _pos(e), strand)
  244. def _split_compound_loc(compound_loc):
  245. """Split a tricky compound location string (PRIVATE).
  246. >>> list(_split_compound_loc("123..145"))
  247. ['123..145']
  248. >>> list(_split_compound_loc("123..145,200..209"))
  249. ['123..145', '200..209']
  250. >>> list(_split_compound_loc("one-of(200,203)..300"))
  251. ['one-of(200,203)..300']
  252. >>> list(_split_compound_loc("complement(123..145),200..209"))
  253. ['complement(123..145)', '200..209']
  254. >>> list(_split_compound_loc("123..145,one-of(200,203)..209"))
  255. ['123..145', 'one-of(200,203)..209']
  256. >>> list(_split_compound_loc("123..145,one-of(200,203)..one-of(209,211),300"))
  257. ['123..145', 'one-of(200,203)..one-of(209,211)', '300']
  258. >>> list(_split_compound_loc("123..145,complement(one-of(200,203)..one-of(209,211)),300"))
  259. ['123..145', 'complement(one-of(200,203)..one-of(209,211))', '300']
  260. >>> list(_split_compound_loc("123..145,200..one-of(209,211),300"))
  261. ['123..145', '200..one-of(209,211)', '300']
  262. >>> list(_split_compound_loc("123..145,200..one-of(209,211)"))
  263. ['123..145', '200..one-of(209,211)']
  264. >>> list(_split_compound_loc("complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905"))
  265. ['complement(149815..150200)', 'complement(293787..295573)', 'NC_016402.1:6618..6676', '181647..181905']
  266. """
  267. if "one-of(" in compound_loc:
  268. #Hard case
  269. while "," in compound_loc:
  270. assert compound_loc[0] != ","
  271. assert compound_loc[0:2] != ".."
  272. i = compound_loc.find(",")
  273. part = compound_loc[:i]
  274. compound_loc = compound_loc[i:] # includes the comma
  275. while part.count("(") > part.count(")"):
  276. assert "one-of(" in part, (part, compound_loc)
  277. i = compound_loc.find(")")
  278. part += compound_loc[:i+1]
  279. compound_loc = compound_loc[i+1:]
  280. if compound_loc.startswith(".."):
  281. i = compound_loc.find(",")
  282. if i==-1:
  283. part += compound_loc
  284. compound_loc = ""
  285. else:
  286. part += compound_loc[:i]
  287. compound_loc = compound_loc[i:] # includes the comma
  288. while part.count("(") > part.count(")"):
  289. assert part.count("one-of(") == 2
  290. i = compound_loc.find(")")
  291. part += compound_loc[:i+1]
  292. compound_loc = compound_loc[i+1:]
  293. if compound_loc.startswith(","):
  294. compound_loc = compound_loc[1:]
  295. assert part
  296. yield part
  297. if compound_loc:
  298. yield compound_loc
  299. else:
  300. #Easy case
  301. for part in compound_loc.split(","):
  302. yield part
  303. class Iterator(object):
  304. """Iterator interface to move over a file of GenBank entries one at a time (OBSOLETE).
  305. This class is likely to be deprecated in a future release of Biopython.
  306. Please use Bio.SeqIO.parse(..., format="gb") or Bio.GenBank.parse(...)
  307. for SeqRecord and GenBank specific Record objects respectively instead.
  308. """
  309. def __init__(self, handle, parser = None):
  310. """Initialize the iterator.
  311. Arguments:
  312. o handle - A handle with GenBank entries to iterate through.
  313. o parser - An optional parser to pass the entries through before
  314. returning them. If None, then the raw entry will be returned.
  315. """
  316. self.handle = handle
  317. self._parser = parser
  318. def __next__(self):
  319. """Return the next GenBank record from the handle.
  320. Will return None if we ran out of records.
  321. """
  322. if self._parser is None:
  323. lines = []
  324. while True:
  325. line = self.handle.readline()
  326. if not line:
  327. return None # Premature end of file?
  328. lines.append(line)
  329. if line.rstrip() == "//":
  330. break
  331. return "".join(lines)
  332. try:
  333. return self._parser.parse(self.handle)
  334. except StopIteration:
  335. return None
  336. if sys.version_info[0] < 3:
  337. def next(self):
  338. """Python 2 style alias for Python 3 style __next__ method."""
  339. return self.__next__()
  340. def __iter__(self):
  341. return iter(self.__next__, None)
  342. class ParserFailureError(Exception):
  343. """Failure caused by some kind of problem in the parser.
  344. """
  345. pass
  346. class LocationParserError(Exception):
  347. """Could not Properly parse out a location from a GenBank file.
  348. """
  349. pass
  350. class FeatureParser(object):
  351. """Parse GenBank files into Seq + Feature objects (OBSOLETE).
  352. Direct use of this class is discouraged, and may be deprecated in
  353. a future release of Biopython.
  354. Please use Bio.SeqIO.parse(...) or Bio.SeqIO.read(...) instead.
  355. """
  356. def __init__(self, debug_level = 0, use_fuzziness = 1,
  357. feature_cleaner = FeatureValueCleaner()):
  358. """Initialize a GenBank parser and Feature consumer.
  359. Arguments:
  360. o debug_level - An optional argument that species the amount of
  361. debugging information the parser should spit out. By default we have
  362. no debugging info (the fastest way to do things), but if you want
  363. you can set this as high as two and see exactly where a parse fails.
  364. o use_fuzziness - Specify whether or not to use fuzzy representations.
  365. The default is 1 (use fuzziness).
  366. o feature_cleaner - A class which will be used to clean out the
  367. values of features. This class must implement the function
  368. clean_value. GenBank.utils has a "standard" cleaner class, which
  369. is used by default.
  370. """
  371. self._scanner = GenBankScanner(debug_level)
  372. self.use_fuzziness = use_fuzziness
  373. self._cleaner = feature_cleaner
  374. def parse(self, handle):
  375. """Parse the specified handle.
  376. """
  377. self._consumer = _FeatureConsumer(self.use_fuzziness,
  378. self._cleaner)
  379. self._scanner.feed(handle, self._consumer)
  380. return self._consumer.data
  381. class RecordParser(object):
  382. """Parse GenBank files into Record objects (OBSOLETE).
  383. Direct use of this class is discouraged, and may be deprecated in
  384. a future release of Biopython.
  385. Please use the Bio.GenBank.parse(...) or Bio.GenBank.read(...) functions
  386. instead.
  387. """
  388. def __init__(self, debug_level = 0):
  389. """Initialize the parser.
  390. Arguments:
  391. o debug_level - An optional argument that species the amount of
  392. debugging information the parser should spit out. By default we have
  393. no debugging info (the fastest way to do things), but if you want
  394. you can set this as high as two and see exactly where a parse fails.
  395. """
  396. self._scanner = GenBankScanner(debug_level)
  397. def parse(self, handle):
  398. """Parse the specified handle into a GenBank record.
  399. """
  400. self._consumer = _RecordConsumer()
  401. self._scanner.feed(handle, self._consumer)
  402. return self._consumer.data
  403. class _BaseGenBankConsumer(object):
  404. """Abstract GenBank consumer providing useful general functions (PRIVATE).
  405. This just helps to eliminate some duplication in things that most
  406. GenBank consumers want to do.
  407. """
  408. # Special keys in GenBank records that we should remove spaces from
  409. # For instance, \translation keys have values which are proteins and
  410. # should have spaces and newlines removed from them. This class
  411. # attribute gives us more control over specific formatting problems.
  412. remove_space_keys = ["translation"]
  413. def __init__(self):
  414. pass
  415. def _unhandled(self, data):
  416. pass
  417. def __getattr__(self, attr):
  418. return self._unhandled
  419. def _split_keywords(self, keyword_string):
  420. """Split a string of keywords into a nice clean list.
  421. """
  422. # process the keywords into a python list
  423. if keyword_string == "" or keyword_string == ".":
  424. keywords = ""
  425. elif keyword_string[-1] == '.':
  426. keywords = keyword_string[:-1]
  427. else:
  428. keywords = keyword_string
  429. keyword_list = keywords.split(';')
  430. clean_keyword_list = [x.strip() for x in keyword_list]
  431. return clean_keyword_list
  432. def _split_accessions(self, accession_string):
  433. """Split a string of accession numbers into a list.
  434. """
  435. # first replace all line feeds with spaces
  436. # Also, EMBL style accessions are split with ';'
  437. accession = accession_string.replace("\n", " ").replace(";", " ")
  438. return [x.strip() for x in accession.split() if x.strip()]
  439. def _split_taxonomy(self, taxonomy_string):
  440. """Split a string with taxonomy info into a list.
  441. """
  442. if not taxonomy_string or taxonomy_string==".":
  443. #Missing data, no taxonomy
  444. return []
  445. if taxonomy_string[-1] == '.':
  446. tax_info = taxonomy_string[:-1]
  447. else:
  448. tax_info = taxonomy_string
  449. tax_list = tax_info.split(';')
  450. new_tax_list = []
  451. for tax_item in tax_list:
  452. new_items = tax_item.split("\n")
  453. new_tax_list.extend(new_items)
  454. while '' in new_tax_list:
  455. new_tax_list.remove('')
  456. clean_tax_list = [x.strip() for x in new_tax_list]
  457. return clean_tax_list
  458. def _clean_location(self, location_string):
  459. """Clean whitespace out of a location string.
  460. The location parser isn't a fan of whitespace, so we clean it out
  461. before feeding it into the parser.
  462. """
  463. #Originally this imported string.whitespace and did a replace
  464. #via a loop. It's simpler to just split on whitespace and rejoin
  465. #the string - and this avoids importing string too. See Bug 2684.
  466. return ''.join(location_string.split())
  467. def _remove_newlines(self, text):
  468. """Remove any newlines in the passed text, returning the new string.
  469. """
  470. # get rid of newlines in the qualifier value
  471. newlines = ["\n", "\r"]
  472. for ws in newlines:
  473. text = text.replace(ws, "")
  474. return text
  475. def _normalize_spaces(self, text):
  476. """Replace multiple spaces in the passed text with single spaces.
  477. """
  478. # get rid of excessive spaces
  479. return ' '.join(x for x in text.split(" ") if x)
  480. def _remove_spaces(self, text):
  481. """Remove all spaces from the passed text.
  482. """
  483. return text.replace(" ", "")
  484. def _convert_to_python_numbers(self, start, end):
  485. """Convert a start and end range to python notation.
  486. In GenBank, starts and ends are defined in "biological" coordinates,
  487. where 1 is the first base and [i, j] means to include both i and j.
  488. In python, 0 is the first base and [i, j] means to include i, but
  489. not j.
  490. So, to convert "biological" to python coordinates, we need to
  491. subtract 1 from the start, and leave the end and things should
  492. be converted happily.
  493. """
  494. new_start = start - 1
  495. new_end = end
  496. return new_start, new_end
  497. class _FeatureConsumer(_BaseGenBankConsumer):
  498. """Create a SeqRecord object with Features to return (PRIVATE).
  499. Attributes:
  500. o use_fuzziness - specify whether or not to parse with fuzziness in
  501. feature locations.
  502. o feature_cleaner - a class that will be used to provide specialized
  503. cleaning-up of feature values.
  504. """
  505. def __init__(self, use_fuzziness, feature_cleaner = None):
  506. from Bio.SeqRecord import SeqRecord
  507. _BaseGenBankConsumer.__init__(self)
  508. self.data = SeqRecord(None, id = None)
  509. self.data.id = None
  510. self.data.description = ""
  511. self._use_fuzziness = use_fuzziness
  512. self._feature_cleaner = feature_cleaner
  513. self._seq_type = ''
  514. self._seq_data = []
  515. self._cur_reference = None
  516. self._cur_feature = None
  517. self._expected_size = None
  518. def locus(self, locus_name):
  519. """Set the locus name is set as the name of the Sequence.
  520. """
  521. self.data.name = locus_name
  522. def size(self, content):
  523. """Record the sequence length."""
  524. self._expected_size = int(content)
  525. def residue_type(self, type):
  526. """Record the sequence type so we can choose an appropriate alphabet.
  527. """
  528. self._seq_type = type.strip()
  529. def data_file_division(self, division):
  530. self.data.annotations['data_file_division'] = division
  531. def date(self, submit_date):
  532. self.data.annotations['date'] = submit_date
  533. def definition(self, definition):
  534. """Set the definition as the description of the sequence.
  535. """
  536. if self.data.description:
  537. #Append to any existing description
  538. #e.g. EMBL files with two DE lines.
  539. self.data.description += " " + definition
  540. else:
  541. self.data.description = definition
  542. def accession(self, acc_num):
  543. """Set the accession number as the id of the sequence.
  544. If we have multiple accession numbers, the first one passed is
  545. used.
  546. """
  547. new_acc_nums = self._split_accessions(acc_num)
  548. #Also record them ALL in the annotations
  549. try:
  550. #On the off chance there was more than one accession line:
  551. for acc in new_acc_nums:
  552. #Prevent repeat entries
  553. if acc not in self.data.annotations['accessions']:
  554. self.data.annotations['accessions'].append(acc)
  555. except KeyError:
  556. self.data.annotations['accessions'] = new_acc_nums
  557. # if we haven't set the id information yet, add the first acc num
  558. if not self.data.id:
  559. if len(new_acc_nums) > 0:
  560. #self.data.id = new_acc_nums[0]
  561. #Use the FIRST accession as the ID, not the first on this line!
  562. self.data.id = self.data.annotations['accessions'][0]
  563. def wgs(self, content):
  564. self.data.annotations['wgs'] = content.split('-')
  565. def add_wgs_scafld(self, content):
  566. self.data.annotations.setdefault('wgs_scafld', []).append(content.split('-'))
  567. def nid(self, content):
  568. self.data.annotations['nid'] = content
  569. def pid(self, content):
  570. self.data.annotations['pid'] = content
  571. def version(self, version_id):
  572. #Want to use the versioned accession as the record.id
  573. #This comes from the VERSION line in GenBank files, or the
  574. #obsolete SV line in EMBL. For the new EMBL files we need
  575. #both the version suffix from the ID line and the accession
  576. #from the AC line.
  577. if version_id.count(".")==1 and version_id.split(".")[1].isdigit():
  578. self.accession(version_id.split(".")[0])
  579. self.version_suffix(version_id.split(".")[1])
  580. elif version_id:
  581. #For backwards compatibility...
  582. self.data.id = version_id
  583. def project(self, content):
  584. """Handle the information from the PROJECT line as a list of projects.
  585. e.g.
  586. PROJECT GenomeProject:28471
  587. or:
  588. PROJECT GenomeProject:13543 GenomeProject:99999
  589. This is stored as dbxrefs in the SeqRecord to be consistent with the
  590. projected switch of this line to DBLINK in future GenBank versions.
  591. Note the NCBI plan to replace "GenomeProject:28471" with the shorter
  592. "Project:28471" as part of this transition.
  593. """
  594. content = content.replace("GenomeProject:", "Project:")
  595. self.data.dbxrefs.extend(p for p in content.split() if p)
  596. def dblink(self, content):
  597. """Store DBLINK cross references as dbxrefs in our record object.
  598. This line type is expected to replace the PROJECT line in 2009. e.g.
  599. During transition:
  600. PROJECT GenomeProject:28471
  601. DBLINK Project:28471
  602. Trace Assembly Archive:123456
  603. Once the project line is dropped:
  604. DBLINK Project:28471
  605. Trace Assembly Archive:123456
  606. Note GenomeProject -> Project.
  607. We'll have to see some real examples to be sure, but based on the
  608. above example we can expect one reference per line.
  609. Note that at some point the NCBI have included an extra space, e.g.
  610. DBLINK Project: 28471
  611. """
  612. #During the transition period with both PROJECT and DBLINK lines,
  613. #we don't want to add the same cross reference twice.
  614. while ": " in content:
  615. content = content.replace(": ", ":")
  616. if content.strip() not in self.data.dbxrefs:
  617. self.data.dbxrefs.append(content.strip())
  618. def version_suffix(self, version):
  619. """Set the version to overwrite the id.
  620. Since the verison provides the same information as the accession
  621. number, plus some extra info, we set this as the id if we have
  622. a version.
  623. """
  624. #e.g. GenBank line:
  625. #VERSION U49845.1 GI:1293613
  626. #or the obsolete EMBL line:
  627. #SV U49845.1
  628. #Scanner calls consumer.version("U49845.1")
  629. #which then calls consumer.version_suffix(1)
  630. #
  631. #e.g. EMBL new line:
  632. #ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
  633. #Scanner calls consumer.version_suffix(1)
  634. assert version.isdigit()
  635. self.data.annotations['sequence_version'] = int(version)
  636. def db_source(self, content):
  637. self.data.annotations['db_source'] = content.rstrip()
  638. def gi(self, content):
  639. self.data.annotations['gi'] = content
  640. def keywords(self, content):
  641. if 'keywords' in self.data.annotations:
  642. #Multi-line keywords, append to list
  643. #Note EMBL states "A keyword is never split between lines."
  644. self.data.annotations['keywords'].extend(self._split_keywords(content))
  645. else:
  646. self.data.annotations['keywords'] = self._split_keywords(content)
  647. def segment(self, content):
  648. self.data.annotations['segment'] = content
  649. def source(self, content):
  650. #Note that some software (e.g. VectorNTI) may produce an empty
  651. #source (rather than using a dot/period as might be expected).
  652. if content == "":
  653. source_info = ""
  654. elif content[-1] == '.':
  655. source_info = content[:-1]
  656. else:
  657. source_info = content
  658. self.data.annotations['source'] = source_info
  659. def organism(self, content):
  660. self.data.annotations['organism'] = content
  661. def taxonomy(self, content):
  662. """Records (another line of) the taxonomy lineage.
  663. """
  664. lineage = self._split_taxonomy(content)
  665. try:
  666. self.data.annotations['taxonomy'].extend(lineage)
  667. except KeyError:
  668. self.data.annotations['taxonomy'] = lineage
  669. def reference_num(self, content):
  670. """Signal the beginning of a new reference object.
  671. """
  672. # if we have a current reference that hasn't been added to
  673. # the list of references, add it.
  674. if self._cur_reference is not None:
  675. self.data.annotations['references'].append(self._cur_reference)
  676. else:
  677. self.data.annotations['references'] = []
  678. self._cur_reference = SeqFeature.Reference()
  679. def reference_bases(self, content):
  680. """Attempt to determine the sequence region the reference entails.
  681. Possible types of information we may have to deal with:
  682. (bases 1 to 86436)
  683. (sites)
  684. (bases 1 to 105654; 110423 to 111122)
  685. 1 (residues 1 to 182)
  686. """
  687. # first remove the parentheses or other junk
  688. ref_base_info = content[1:-1]
  689. all_locations = []
  690. # parse if we've got 'bases' and 'to'
  691. if 'bases' in ref_base_info and 'to' in ref_base_info:
  692. # get rid of the beginning 'bases'
  693. ref_base_info = ref_base_info[5:]
  694. locations = self._split_reference_locations(ref_base_info)
  695. all_locations.extend(locations)
  696. elif 'residues' in ref_base_info and 'to' in ref_base_info:
  697. residues_start = ref_base_info.find("residues")
  698. # get only the information after "residues"
  699. ref_base_info = ref_base_info[(residues_start + len("residues ")):]
  700. locations = self._split_reference_locations(ref_base_info)
  701. all_locations.extend(locations)
  702. # make sure if we are not finding information then we have
  703. # the string 'sites' or the string 'bases'
  704. elif (ref_base_info == 'sites' or
  705. ref_base_info.strip() == 'bases'):
  706. pass
  707. # otherwise raise an error
  708. else:
  709. raise ValueError("Could not parse base info %s in record %s" %
  710. (ref_base_info, self.data.id))
  711. self._cur_reference.location = all_locations
  712. def _split_reference_locations(self, location_string):
  713. """Get reference locations out of a string of reference information
  714. The passed string should be of the form:
  715. 1 to 20; 20 to 100
  716. This splits the information out and returns a list of location objects
  717. based on the reference locations.
  718. """
  719. # split possibly multiple locations using the ';'
  720. all_base_info = location_string.split(';')
  721. new_locations = []
  722. for base_info in all_base_info:
  723. start, end = base_info.split('to')
  724. new_start, new_end = \
  725. self._convert_to_python_numbers(int(start.strip()),
  726. int(end.strip()))
  727. this_location = SeqFeature.FeatureLocation(new_start, new_end)
  728. new_locations.append(this_location)
  729. return new_locations
  730. def authors(self, content):
  731. if self._cur_reference.authors:
  732. self._cur_reference.authors += ' ' + content
  733. else:
  734. self._cur_reference.authors = content
  735. def consrtm(self, content):
  736. if self._cur_reference.consrtm:
  737. self._cur_reference.consrtm += ' ' + content
  738. else:
  739. self._cur_reference.consrtm = content
  740. def title(self, content):
  741. if self._cur_reference is None:
  742. import warnings
  743. from Bio import BiopythonParserWarning
  744. warnings.warn("GenBank TITLE line without REFERENCE line.",
  745. BiopythonParserWarning)
  746. elif self._cur_reference.title:
  747. self._cur_reference.title += ' ' + content
  748. else:
  749. self._cur_reference.title = content
  750. def journal(self, content):
  751. if self._cur_reference.journal:
  752. self._cur_reference.journal += ' ' + content
  753. else:
  754. self._cur_reference.journal = content
  755. def medline_id(self, content):
  756. self._cur_reference.medline_id = content
  757. def pubmed_id(self, content):
  758. self._cur_reference.pubmed_id = content
  759. def remark(self, content):
  760. """Deal with a reference comment."""
  761. if self._cur_reference.comment:
  762. self._cur_reference.comment += ' ' + content
  763. else:
  764. self._cur_reference.comment = content
  765. def comment(self, content):
  766. try:
  767. self.data.annotations['comment'] += "\n" + "\n".join(content)
  768. except KeyError:
  769. self.data.annotations['comment'] = "\n".join(content)
  770. def features_line(self, content):
  771. """Get ready for the feature table when we reach the FEATURE line.
  772. """
  773. self.start_feature_table()
  774. def start_feature_table(self):
  775. """Indicate we've got to the start of the feature table.
  776. """
  777. # make sure we've added on our last reference object
  778. if self._cur_reference is not None:
  779. self.data.annotations['references'].append(self._cur_reference)
  780. self._cur_reference = None
  781. def feature_key(self, content):
  782. # start a new feature
  783. self._cur_feature = SeqFeature.SeqFeature()
  784. self._cur_feature.type = content
  785. self.data.features.append(self._cur_feature)
  786. def location(self, content):
  787. """Parse out location information from the location string.
  788. This uses simple Python code with some regular expressions to do the
  789. parsing, and then translates the results into appropriate objects.
  790. """
  791. # clean up newlines and other whitespace inside the location before
  792. # parsing - locations should have no whitespace whatsoever
  793. location_line = self._clean_location(content)
  794. # Older records have junk like replace(266,"c") in the
  795. # location line. Newer records just replace this with
  796. # the number 266 and have the information in a more reasonable
  797. # place. So we'll just grab out the number and feed this to the
  798. # parser. We shouldn't really be losing any info this way.
  799. if 'replace' in location_line:
  800. comma_pos = location_line.find(',')
  801. location_line = location_line[8:comma_pos]
  802. cur_feature = self._cur_feature
  803. #Handle top level complement here for speed
  804. if location_line.startswith("complement("):
  805. assert location_line.endswith(")")
  806. location_line = location_line[11:-1]
  807. strand = -1
  808. elif "PROTEIN" in self._seq_type.upper():
  809. strand = None
  810. else:
  811. #Assume nucleotide otherwise feature strand for
  812. #GenBank files with bad LOCUS lines set to None
  813. strand = 1
  814. #Special case handling of the most common cases for speed
  815. if _re_simple_location.match(location_line):
  816. #e.g. "123..456"
  817. s, e = location_line.split("..")
  818. cur_feature.location = SeqFeature.FeatureLocation(int(s)-1,
  819. int(e),
  820. strand)
  821. return
  822. if _solo_bond.search(location_line):
  823. #e.g. bond(196)
  824. #e.g. join(bond(284),bond(305),bond(309),bond(305))
  825. import warnings
  826. from Bio import BiopythonParserWarning
  827. warnings.warn("Dropping bond qualifier in feature location", BiopythonParserWarning)
  828. #There ought to be a better way to do this...
  829. for x in _solo_bond.finditer(location_line):
  830. x = x.group()
  831. location_line = location_line.replace(x, x[5:-1])
  832. if _re_simple_compound.match(location_line):
  833. #e.g. join(<123..456,480..>500)
  834. i = location_line.find("(")
  835. #cur_feature.location_operator = location_line[:i]
  836. #we can split on the comma because these are simple locations
  837. sub_features = cur_feature.sub_features
  838. for part in location_line[i+1:-1].split(","):
  839. s, e = part.split("..")
  840. f = SeqFeature.SeqFeature(SeqFeature.FeatureLocation(int(s)-1,
  841. int(e),
  842. strand),
  843. location_operator=cur_feature.location_operator,
  844. type=cur_feature.type)
  845. sub_features.append(f)
  846. #s = cur_feature.sub_features[0].location.start
  847. #e = cur_feature.sub_features[-1].location.end
  848. #cur_feature.location = SeqFeature.FeatureLocation(s,e, strand)
  849. #TODO - Remove use of sub_features
  850. if strand == -1:
  851. cur_feature.location = SeqFeature.CompoundLocation([f.location for f in sub_features[::-1]],
  852. operator=location_line[:i])
  853. else:
  854. cur_feature.location = SeqFeature.CompoundLocation([f.location for f in sub_features],
  855. operator=location_line[:i])
  856. return
  857. #Handle the general case with more complex regular expressions
  858. if _re_complex_location.match(location_line):
  859. #e.g. "AL121804.2:41..610"
  860. if ":" in location_line:
  861. location_ref, location_line = location_line.split(":")
  862. cur_feature.location = _loc(location_line, self._expected_size, strand)
  863. cur_feature.location.ref = location_ref
  864. else:
  865. cur_feature.location = _loc(location_line, self._expected_size, strand)
  866. return
  867. if _re_complex_compound.match(location_line):
  868. i = location_line.find("(")
  869. #cur_feature.location_operator = location_line[:i]
  870. #Can't split on the comma because of positions like one-of(1,2,3)
  871. sub_features = cur_feature.sub_features
  872. for part in _split_compound_loc(location_line[i+1:-1]):
  873. if part.startswith("complement("):
  874. assert part[-1]==")"
  875. part = part[11:-1]
  876. assert strand != -1, "Double complement?"
  877. part_strand = -1
  878. else:
  879. part_strand = strand
  880. if ":" in part:
  881. ref, part = part.split(":")
  882. else:
  883. ref = None
  884. try:
  885. loc = _loc(part, self._expected_size, part_strand)
  886. except ValueError as err:
  887. print(location_line)
  888. print(part)
  889. raise err
  890. f = SeqFeature.SeqFeature(location=loc, ref=ref,
  891. location_operator=cur_feature.location_operator,
  892. type=cur_feature.type)
  893. sub_features.append(f)
  894. # Historically a join on the reverse strand has been represented
  895. # in Biopython with both the parent SeqFeature and its children
  896. # (the exons for a CDS) all given a strand of -1. Likewise, for
  897. # a join feature on the forward strand they all have strand +1.
  898. # However, we must also consider evil mixed strand examples like
  899. # this, join(complement(69611..69724),139856..140087,140625..140650)
  900. #
  901. # TODO - Remove use of sub_features
  902. strands = set(sf.strand for sf in sub_features)
  903. if len(strands)==1:
  904. strand = sub_features[0].strand
  905. else:
  906. strand = None # i.e. mixed strands
  907. if strand == -1:
  908. #Reverse the backwards order used in GenBank files
  909. cur_feature.location = SeqFeature.CompoundLocation([f.location for f in sub_features[::-1]],
  910. operator=location_line[:i])
  911. else:
  912. cur_feature.location = SeqFeature.CompoundLocation([f.location for f in sub_features],
  913. operator=location_line[:i])
  914. return
  915. #Not recognised
  916. if "order" in location_line and "join" in location_line:
  917. #See Bug 3197
  918. msg = 'Combinations of "join" and "order" within the same ' + \
  919. 'location (nested operators) are illegal:\n' + location_line
  920. raise LocationParserError(msg)
  921. #This used to be an error....
  922. cur_feature.location = None
  923. import warnings
  924. from Bio import BiopythonParserWarning
  925. warnings.warn(BiopythonParserWarning("Couldn't parse feature location: %r"
  926. % (location_line)))
  927. def feature_qualifier(self, key, value):
  928. """When we get a qualifier key and its value.
  929. Can receive None, since you can have valueless keys such as /pseudo
  930. """
  931. # Hack to try to preserve historical behaviour of /pseudo etc
  932. if value is None:
  933. # if the key doesn't exist yet, add an empty string
  934. if key not in self._cur_feature.qualifiers:
  935. self._cur_feature.qualifiers[key] = [""]
  936. return
  937. # otherwise just skip this key
  938. return
  939. value = value.replace('"', '')
  940. if self._feature_cleaner is not None:
  941. value = self._feature_cleaner.clean_value(key, value)
  942. # if the qualifier name exists, append the value
  943. if key in self._cur_feature.qualifiers:
  944. self._cur_feature.qualifiers[key].append(value)
  945. # otherwise start a new list of the key with its values
  946. else:
  947. self._cur_feature.qualifiers[key] = [value]
  948. def feature_qualifier_name(self, content_list):
  949. """Use feature_qualifier instead (OBSOLETE)."""
  950. raise NotImplementedError("Use the feature_qualifier method instead.")
  951. def feature_qualifier_description(self, content):
  952. """Use feature_qualifier instead (OBSOLETE)."""
  953. raise NotImplementedError("Use the feature_qualifier method instead.")
  954. def contig_location(self, content):
  955. """Deal with CONTIG information."""
  956. #Historically this was stored as a SeqFeature object, but it was
  957. #stored under record.annotations["contig"] and not under
  958. #record.features with the other SeqFeature objects.
  959. #
  960. #The CONTIG location line can include additional tokens like
  961. #Gap(), Gap(100) or Gap(unk100) which are not used in the feature
  962. #location lines, so storing it using SeqFeature based location
  963. #objects is difficult.
  964. #
  965. #We now store this a string, which means for BioSQL we are now in
  966. #much better agreement with how BioPerl records the CONTIG line
  967. #in the database.
  968. #
  969. #NOTE - This code assumes the scanner will return all the CONTIG
  970. #lines already combined into one long string!
  971. self.data.annotations["contig"] = content
  972. def origin_name(self, content):
  973. pass
  974. def base_count(self, content):
  975. pass
  976. def base_number(self, content):
  977. pass
  978. def sequence(self, content):
  979. """Add up sequence information as we get it.
  980. To try and make things speedier, this puts all of the strings
  981. into a list of strings, and then uses string.join later to put
  982. them together. Supposedly, this is a big time savings
  983. """
  984. assert ' ' not in content
  985. self._seq_data.append(content.upper())
  986. def record_end(self, content):
  987. """Clean up when we've finished the record.
  988. """
  989. from Bio import Alphabet
  990. from Bio.Alphabet import IUPAC
  991. from Bio.Seq import Seq, UnknownSeq
  992. #Try and append the version number to the accession for the full id
  993. if not self.data.id:
  994. assert 'accessions' not in self.data.annotations, \
  995. self.data.annotations['accessions']
  996. self.data.id = self.data.name # Good fall back?
  997. elif self.data.id.count('.') == 0:
  998. try:
  999. self.data.id+='.%i' % self.data.annotations['sequence_version']
  1000. except KeyError:
  1001. pass
  1002. # add the sequence information
  1003. # first, determine the alphabet
  1004. # we default to an generic alphabet if we don't have a
  1005. # seq type or have strange sequence information.
  1006. seq_alphabet = Alphabet.generic_alphabet
  1007. # now set the sequence
  1008. sequence = "".join(self._seq_data)
  1009. if self._expected_size is not None \
  1010. and len(sequence) != 0 \
  1011. and self._expected_size != len(sequence):
  1012. import warnings
  1013. from Bio import BiopythonParserWarning
  1014. warnings.warn("Expected sequence length %i, found %i (%s)."
  1015. % (self._expected_size, len(sequence), self.data.id),
  1016. BiopythonParserWarning)
  1017. if self._seq_type:
  1018. # mRNA is really also DNA, since it is actually cDNA
  1019. if 'DNA' in self._seq_type.upper() or 'MRNA' in self._seq_type.upper():
  1020. seq_alphabet = IUPAC.ambiguous_dna
  1021. # are there ever really RNA sequences in GenBank?
  1022. elif 'RNA' in self._seq_type.upper():
  1023. #Even for data which was from RNA, the sequence string
  1024. #is usually given as DNA (T not U). Bug 2408
  1025. if "T" in sequence and "U" not in sequence:
  1026. seq_alphabet = IUPAC.ambiguous_dna
  1027. else:
  1028. seq_alphabet = IUPAC.ambiguous_rna
  1029. elif 'PROTEIN' in self._seq_type.upper() \
  1030. or self._seq_type == "PRT": # PRT is used in EMBL-bank for patents
  1031. seq_alphabet = IUPAC.protein # or extended protein?
  1032. # work around ugly GenBank records which have circular or
  1033. # linear but no indication of sequence type
  1034. elif self._seq_type in ["circular", "linear", "unspecified"]:
  1035. pass
  1036. # we have a bug if we get here
  1037. else:
  1038. raise ValueError("Could not determine alphabet for seq_type %s"
  1039. % self._seq_type)
  1040. if not sequence and self.__expected_size:
  1041. self.data.seq = UnknownSeq(self._expected_size, seq_alphabet)
  1042. else:
  1043. self.data.seq = Seq(sequence, seq_alphabet)
  1044. class _RecordConsumer(_BaseGenBankConsumer):
  1045. """Create a GenBank Record object from scanner generated information (PRIVATE).
  1046. """
  1047. def __init__(self):
  1048. _BaseGenBankConsumer.__init__(self)
  1049. from . import Record
  1050. self.data = Record.Record()
  1051. self._seq_data = []
  1052. self._cur_reference = None
  1053. self._cur_feature = None
  1054. self._cur_qualifier = None
  1055. def wgs(self, content):
  1056. self.data.wgs = content.split('-')
  1057. def add_wgs_scafld(self, content):
  1058. self.data.wgs_scafld.append(content.split('-'))