PageRenderTime 39ms CodeModel.GetById 8ms RepoModel.GetById 0ms app.codeStats 0ms

/lib/galaxy/datatypes/tabular.py

https://bitbucket.org/cistrome/cistrome-harvard/
Python | 819 lines | 674 code | 50 blank | 95 comment | 53 complexity | ee5ab63e4af7f5f83dc1d5c814edb05e MD5 | raw file
  1. """
  2. Tabular datatype
  3. """
  4. import pkg_resources
  5. pkg_resources.require( "bx-python" )
  6. import gzip
  7. import logging
  8. import os
  9. from cgi import escape
  10. from galaxy import util
  11. from galaxy.datatypes import data
  12. from galaxy.datatypes import metadata
  13. from galaxy.datatypes.checkers import is_gzip
  14. from galaxy.datatypes.metadata import MetadataElement
  15. from galaxy.datatypes.sniff import get_headers, get_test_fname
  16. from galaxy.util.json import to_json_string
  17. import dataproviders
  18. log = logging.getLogger(__name__)
  19. @dataproviders.decorators.has_dataproviders
  20. class Tabular( data.Text ):
  21. """Tab delimited data"""
  22. # All tabular data is chunkable.
  23. CHUNKABLE = True
  24. CHUNK_SIZE = 50000
  25. """Add metadata elements"""
  26. MetadataElement( name="comment_lines", default=0, desc="Number of comment lines", readonly=False, optional=True, no_value=0 )
  27. MetadataElement( name="columns", default=0, desc="Number of columns", readonly=True, visible=False, no_value=0 )
  28. MetadataElement( name="column_types", default=[], desc="Column types", param=metadata.ColumnTypesParameter, readonly=True, visible=False, no_value=[] )
  29. MetadataElement( name="column_names", default=[], desc="Column names", readonly=True, visible=False, optional=True, no_value=[] )
  30. def set_meta( self, dataset, overwrite = True, skip = None, max_data_lines = 100000, max_guess_type_data_lines = None, **kwd ):
  31. """
  32. Tries to determine the number of columns as well as those columns that
  33. contain numerical values in the dataset. A skip parameter is used
  34. because various tabular data types reuse this function, and their data
  35. type classes are responsible to determine how many invalid comment
  36. lines should be skipped. Using None for skip will cause skip to be
  37. zero, but the first line will be processed as a header. A
  38. max_data_lines parameter is used because various tabular data types
  39. reuse this function, and their data type classes are responsible to
  40. determine how many data lines should be processed to ensure that the
  41. non-optional metadata parameters are properly set; if used, optional
  42. metadata parameters will be set to None, unless the entire file has
  43. already been read. Using None for max_data_lines will process all data
  44. lines.
  45. Items of interest:
  46. 1. We treat 'overwrite' as always True (we always want to set tabular metadata when called).
  47. 2. If a tabular file has no data, it will have one column of type 'str'.
  48. 3. We used to check only the first 100 lines when setting metadata and this class's
  49. set_peek() method read the entire file to determine the number of lines in the file.
  50. Since metadata can now be processed on cluster nodes, we've merged the line count portion
  51. of the set_peek() processing here, and we now check the entire contents of the file.
  52. """
  53. # Store original skip value to check with later
  54. requested_skip = skip
  55. if skip is None:
  56. skip = 0
  57. column_type_set_order = [ 'int', 'float', 'list', 'str' ] #Order to set column types in
  58. default_column_type = column_type_set_order[-1] # Default column type is lowest in list
  59. column_type_compare_order = list( column_type_set_order ) #Order to compare column types
  60. column_type_compare_order.reverse()
  61. def type_overrules_type( column_type1, column_type2 ):
  62. if column_type1 is None or column_type1 == column_type2:
  63. return False
  64. if column_type2 is None:
  65. return True
  66. for column_type in column_type_compare_order:
  67. if column_type1 == column_type:
  68. return True
  69. if column_type2 == column_type:
  70. return False
  71. #neither column type was found in our ordered list, this cannot happen
  72. raise "Tried to compare unknown column types"
  73. def is_int( column_text ):
  74. try:
  75. int( column_text )
  76. return True
  77. except:
  78. return False
  79. def is_float( column_text ):
  80. try:
  81. float( column_text )
  82. return True
  83. except:
  84. if column_text.strip().lower() == 'na':
  85. return True #na is special cased to be a float
  86. return False
  87. def is_list( column_text ):
  88. return "," in column_text
  89. def is_str( column_text ):
  90. #anything, except an empty string, is True
  91. if column_text == "":
  92. return False
  93. return True
  94. is_column_type = {} #Dict to store column type string to checking function
  95. for column_type in column_type_set_order:
  96. is_column_type[column_type] = locals()[ "is_%s" % ( column_type ) ]
  97. def guess_column_type( column_text ):
  98. for column_type in column_type_set_order:
  99. if is_column_type[column_type]( column_text ):
  100. return column_type
  101. return None
  102. data_lines = 0
  103. comment_lines = 0
  104. column_types = []
  105. first_line_column_types = [default_column_type] # default value is one column of type str
  106. if dataset.has_data():
  107. #NOTE: if skip > num_check_lines, we won't detect any metadata, and will use default
  108. dataset_fh = open( dataset.file_name )
  109. i = 0
  110. while True:
  111. line = dataset_fh.readline()
  112. if not line: break
  113. line = line.rstrip( '\r\n' )
  114. if i < skip or not line or line.startswith( '#' ):
  115. # We'll call blank lines comments
  116. comment_lines += 1
  117. else:
  118. data_lines += 1
  119. if max_guess_type_data_lines is None or data_lines <= max_guess_type_data_lines:
  120. fields = line.split( '\t' )
  121. for field_count, field in enumerate( fields ):
  122. if field_count >= len( column_types ): #found a previously unknown column, we append None
  123. column_types.append( None )
  124. column_type = guess_column_type( field )
  125. if type_overrules_type( column_type, column_types[field_count] ):
  126. column_types[field_count] = column_type
  127. if i == 0 and requested_skip is None:
  128. # This is our first line, people seem to like to upload files that have a header line, but do not
  129. # start with '#' (i.e. all column types would then most likely be detected as str). We will assume
  130. # that the first line is always a header (this was previous behavior - it was always skipped). When
  131. # the requested skip is None, we only use the data from the first line if we have no other data for
  132. # a column. This is far from perfect, as
  133. # 1,2,3 1.1 2.2 qwerty
  134. # 0 0 1,2,3
  135. # will be detected as
  136. # "column_types": ["int", "int", "float", "list"]
  137. # instead of
  138. # "column_types": ["list", "float", "float", "str"] *** would seem to be the 'Truth' by manual
  139. # observation that the first line should be included as data. The old method would have detected as
  140. # "column_types": ["int", "int", "str", "list"]
  141. first_line_column_types = column_types
  142. column_types = [ None for col in first_line_column_types ]
  143. if max_data_lines is not None and data_lines >= max_data_lines:
  144. if dataset_fh.tell() != dataset.get_size():
  145. data_lines = None #Clear optional data_lines metadata value
  146. comment_lines = None #Clear optional comment_lines metadata value; additional comment lines could appear below this point
  147. break
  148. i += 1
  149. dataset_fh.close()
  150. #we error on the larger number of columns
  151. #first we pad our column_types by using data from first line
  152. if len( first_line_column_types ) > len( column_types ):
  153. for column_type in first_line_column_types[len( column_types ):]:
  154. column_types.append( column_type )
  155. #Now we fill any unknown (None) column_types with data from first line
  156. for i in range( len( column_types ) ):
  157. if column_types[i] is None:
  158. if len( first_line_column_types ) <= i or first_line_column_types[i] is None:
  159. column_types[i] = default_column_type
  160. else:
  161. column_types[i] = first_line_column_types[i]
  162. # Set the discovered metadata values for the dataset
  163. dataset.metadata.data_lines = data_lines
  164. dataset.metadata.comment_lines = comment_lines
  165. dataset.metadata.column_types = column_types
  166. dataset.metadata.columns = len( column_types )
  167. def make_html_table( self, dataset, **kwargs ):
  168. """Create HTML table, used for displaying peek"""
  169. out = ['<table cellspacing="0" cellpadding="3">']
  170. try:
  171. out.append( self.make_html_peek_header( dataset, **kwargs ) )
  172. out.append( self.make_html_peek_rows( dataset, **kwargs ) )
  173. out.append( '</table>' )
  174. out = "".join( out )
  175. except Exception, exc:
  176. out = "Can't create peek %s" % str( exc )
  177. return out
  178. def make_html_peek_header( self, dataset, skipchars=None, column_names=None, column_number_format='%s', column_parameter_alias=None, **kwargs ):
  179. if skipchars is None:
  180. skipchars = []
  181. if column_names is None:
  182. column_names = []
  183. if column_parameter_alias is None:
  184. column_parameter_alias = {}
  185. out = []
  186. try:
  187. if not column_names and dataset.metadata.column_names:
  188. column_names = dataset.metadata.column_names
  189. columns = dataset.metadata.columns
  190. if columns is None:
  191. columns = dataset.metadata.spec.columns.no_value
  192. column_headers = [None] * columns
  193. # fill in empty headers with data from column_names
  194. for i in range( min( columns, len( column_names ) ) ):
  195. if column_headers[i] is None and column_names[i] is not None:
  196. column_headers[i] = column_names[i]
  197. # fill in empty headers from ColumnParameters set in the metadata
  198. for name, spec in dataset.metadata.spec.items():
  199. if isinstance( spec.param, metadata.ColumnParameter ):
  200. try:
  201. i = int( getattr( dataset.metadata, name ) ) - 1
  202. except:
  203. i = -1
  204. if 0 <= i < columns and column_headers[i] is None:
  205. column_headers[i] = column_parameter_alias.get(name, name)
  206. out.append( '<tr>' )
  207. for i, header in enumerate( column_headers ):
  208. out.append( '<th>' )
  209. if header is None:
  210. out.append( column_number_format % str( i + 1 ) )
  211. else:
  212. out.append( '%s.%s' % ( str( i + 1 ), escape( header ) ) )
  213. out.append( '</th>' )
  214. out.append( '</tr>' )
  215. except Exception, exc:
  216. log.exception( 'make_html_peek_header failed on HDA %s' % dataset.id )
  217. raise Exception, "Can't create peek header %s" % str( exc )
  218. return "".join( out )
  219. def make_html_peek_rows( self, dataset, skipchars=None, **kwargs ):
  220. if skipchars is None:
  221. skipchars = []
  222. out = []
  223. try:
  224. if not dataset.peek:
  225. dataset.set_peek()
  226. columns = dataset.metadata.columns
  227. if columns is None:
  228. columns = dataset.metadata.spec.columns.no_value
  229. for line in dataset.peek.splitlines():
  230. if line.startswith( tuple( skipchars ) ):
  231. out.append( '<tr><td colspan="100%%">%s</td></tr>' % escape( line ) )
  232. elif line:
  233. elems = line.split( '\t' )
  234. # we may have an invalid comment line or invalid data
  235. if len( elems ) != columns:
  236. out.append( '<tr><td colspan="100%%">%s</td></tr>' % escape( line ) )
  237. else:
  238. out.append( '<tr>' )
  239. for elem in elems:
  240. out.append( '<td>%s</td>' % escape( elem ) )
  241. out.append( '</tr>' )
  242. except Exception, exc:
  243. log.exception( 'make_html_peek_rows failed on HDA %s' % dataset.id )
  244. raise Exception, "Can't create peek rows %s" % str( exc )
  245. return "".join( out )
  246. def get_chunk(self, trans, dataset, chunk):
  247. ck_index = int(chunk)
  248. f = open(dataset.file_name)
  249. f.seek(ck_index * self.CHUNK_SIZE)
  250. # If we aren't at the start of the file, seek to next newline. Do this better eventually.
  251. if f.tell() != 0:
  252. cursor = f.read(1)
  253. while cursor and cursor != '\n':
  254. cursor = f.read(1)
  255. ck_data = f.read(self.CHUNK_SIZE)
  256. cursor = f.read(1)
  257. while cursor and ck_data[-1] != '\n':
  258. ck_data += cursor
  259. cursor = f.read(1)
  260. return to_json_string( { 'ck_data': util.unicodify( ck_data ), 'ck_index': ck_index + 1 } )
  261. def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None, **kwd):
  262. preview = util.string_as_bool( preview )
  263. if chunk:
  264. return self.get_chunk(trans, dataset, chunk)
  265. elif to_ext or not preview:
  266. return self._serve_raw(trans, dataset, to_ext)
  267. elif dataset.metadata.columns > 50:
  268. #Fancy tabular display is only suitable for datasets without an incredibly large number of columns.
  269. #We should add a new datatype 'matrix', with it's own draw method, suitable for this kind of data.
  270. #For now, default to the old behavior, ugly as it is. Remove this after adding 'matrix'.
  271. max_peek_size = 1000000 # 1 MB
  272. if os.stat( dataset.file_name ).st_size < max_peek_size:
  273. return open( dataset.file_name )
  274. else:
  275. trans.response.set_content_type( "text/html" )
  276. return trans.stream_template_mako( "/dataset/large_file.mako",
  277. truncated_data = open( dataset.file_name ).read(max_peek_size),
  278. data = dataset)
  279. else:
  280. column_names = 'null'
  281. if dataset.metadata.column_names:
  282. column_names = dataset.metadata.column_names
  283. elif hasattr(dataset.datatype, 'column_names'):
  284. column_names = dataset.datatype.column_names
  285. column_types = dataset.metadata.column_types
  286. if not column_types:
  287. column_types = []
  288. column_number = dataset.metadata.columns
  289. if column_number is None:
  290. column_number = 'null'
  291. return trans.fill_template( "/dataset/tabular_chunked.mako",
  292. dataset = dataset,
  293. chunk = self.get_chunk(trans, dataset, 0),
  294. column_number = column_number,
  295. column_names = column_names,
  296. column_types = column_types )
  297. def set_peek( self, dataset, line_count=None, is_multi_byte=False):
  298. super(Tabular, self).set_peek( dataset, line_count=line_count, is_multi_byte=is_multi_byte)
  299. if dataset.metadata.comment_lines:
  300. dataset.blurb = "%s, %s comments" % ( dataset.blurb, util.commaify( str( dataset.metadata.comment_lines ) ) )
  301. def display_peek( self, dataset ):
  302. """Returns formatted html of peek"""
  303. return self.make_html_table( dataset )
  304. def displayable( self, dataset ):
  305. try:
  306. return dataset.has_data() \
  307. and dataset.state == dataset.states.OK \
  308. and dataset.metadata.columns > 0 \
  309. and dataset.metadata.data_lines != 0
  310. except:
  311. return False
  312. def as_gbrowse_display_file( self, dataset, **kwd ):
  313. return open( dataset.file_name )
  314. def as_ucsc_display_file( self, dataset, **kwd ):
  315. return open( dataset.file_name )
  316. # ------------- Dataproviders
  317. @dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings )
  318. def column_dataprovider( self, dataset, **settings ):
  319. """Uses column settings that are passed in"""
  320. dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
  321. return dataproviders.column.ColumnarDataProvider( dataset_source, **settings )
  322. @dataproviders.decorators.dataprovider_factory( 'dataset-column',
  323. dataproviders.column.ColumnarDataProvider.settings )
  324. def dataset_column_dataprovider( self, dataset, **settings ):
  325. """Attempts to get column settings from dataset.metadata"""
  326. return dataproviders.dataset.DatasetColumnarDataProvider( dataset, **settings )
  327. @dataproviders.decorators.dataprovider_factory( 'dict', dataproviders.column.DictDataProvider.settings )
  328. def dict_dataprovider( self, dataset, **settings ):
  329. """Uses column settings that are passed in"""
  330. dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
  331. return dataproviders.column.DictDataProvider( dataset_source, **settings )
  332. @dataproviders.decorators.dataprovider_factory( 'dataset-dict', dataproviders.column.DictDataProvider.settings )
  333. def dataset_dict_dataprovider( self, dataset, **settings ):
  334. """Attempts to get column settings from dataset.metadata"""
  335. return dataproviders.dataset.DatasetDictDataProvider( dataset, **settings )
  336. class Taxonomy( Tabular ):
  337. def __init__(self, **kwd):
  338. """Initialize taxonomy datatype"""
  339. Tabular.__init__( self, **kwd )
  340. self.column_names = ['Name', 'TaxId', 'Root', 'Superkingdom', 'Kingdom', 'Subkingdom',
  341. 'Superphylum', 'Phylum', 'Subphylum', 'Superclass', 'Class', 'Subclass',
  342. 'Superorder', 'Order', 'Suborder', 'Superfamily', 'Family', 'Subfamily',
  343. 'Tribe', 'Subtribe', 'Genus', 'Subgenus', 'Species', 'Subspecies'
  344. ]
  345. def display_peek( self, dataset ):
  346. """Returns formated html of peek"""
  347. return Tabular.make_html_table( self, dataset, column_names=self.column_names )
  348. @dataproviders.decorators.has_dataproviders
  349. class Sam( Tabular ):
  350. file_ext = 'sam'
  351. track_type = "ReadTrack"
  352. data_sources = { "data": "bam", "index": "bigwig" }
  353. def __init__(self, **kwd):
  354. """Initialize taxonomy datatype"""
  355. Tabular.__init__( self, **kwd )
  356. self.column_names = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR',
  357. 'MRNM', 'MPOS', 'ISIZE', 'SEQ', 'QUAL', 'OPT'
  358. ]
  359. def display_peek( self, dataset ):
  360. """Returns formated html of peek"""
  361. return Tabular.make_html_table( self, dataset, column_names=self.column_names )
  362. def sniff( self, filename ):
  363. """
  364. Determines whether the file is in SAM format
  365. A file in SAM format consists of lines of tab-separated data.
  366. The following header line may be the first line::
  367. @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL
  368. or
  369. @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL OPT
  370. Data in the OPT column is optional and can consist of tab-separated data
  371. For complete details see http://samtools.sourceforge.net/SAM1.pdf
  372. Rules for sniffing as True::
  373. There must be 11 or more columns of data on each line
  374. Columns 2 (FLAG), 4(POS), 5 (MAPQ), 8 (MPOS), and 9 (ISIZE) must be numbers (9 can be negative)
  375. We will only check that up to the first 5 alignments are correctly formatted.
  376. >>> fname = get_test_fname( 'sequence.maf' )
  377. >>> Sam().sniff( fname )
  378. False
  379. >>> fname = get_test_fname( '1.sam' )
  380. >>> Sam().sniff( fname )
  381. True
  382. """
  383. try:
  384. fh = open( filename )
  385. count = 0
  386. while True:
  387. line = fh.readline()
  388. line = line.strip()
  389. if not line:
  390. break #EOF
  391. if line:
  392. if line[0] != '@':
  393. linePieces = line.split('\t')
  394. if len(linePieces) < 11:
  395. return False
  396. try:
  397. check = int(linePieces[1])
  398. check = int(linePieces[3])
  399. check = int(linePieces[4])
  400. check = int(linePieces[7])
  401. check = int(linePieces[8])
  402. except ValueError:
  403. return False
  404. count += 1
  405. if count == 5:
  406. return True
  407. fh.close()
  408. if count < 5 and count > 0:
  409. return True
  410. except:
  411. pass
  412. return False
  413. def set_meta( self, dataset, overwrite = True, skip = None, max_data_lines = 5, **kwd ):
  414. if dataset.has_data():
  415. dataset_fh = open( dataset.file_name )
  416. comment_lines = 0
  417. if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
  418. # If the dataset is larger than optional_metadata, just count comment lines.
  419. for i, l in enumerate(dataset_fh):
  420. if l.startswith('@'):
  421. comment_lines += 1
  422. else:
  423. # No more comments, and the file is too big to look at the whole thing. Give up.
  424. dataset.metadata.data_lines = None
  425. break
  426. else:
  427. # Otherwise, read the whole thing and set num data lines.
  428. for i, l in enumerate(dataset_fh):
  429. if l.startswith('@'):
  430. comment_lines += 1
  431. dataset.metadata.data_lines = i + 1 - comment_lines
  432. dataset_fh.close()
  433. dataset.metadata.comment_lines = comment_lines
  434. dataset.metadata.columns = 12
  435. dataset.metadata.column_types = ['str', 'int', 'str', 'int', 'int', 'str', 'str', 'int', 'int', 'str', 'str', 'str']
  436. def merge( split_files, output_file):
  437. """
  438. Multiple SAM files may each have headers. Since the headers should all be the same, remove
  439. the headers from files 1-n, keeping them in the first file only
  440. """
  441. cmd = 'mv %s %s' % ( split_files[0], output_file )
  442. result = os.system(cmd)
  443. if result != 0:
  444. raise Exception('Result %s from %s' % (result, cmd))
  445. if len(split_files) > 1:
  446. cmd = 'egrep -v "^@" %s >> %s' % ( ' '.join(split_files[1:]), output_file )
  447. result = os.system(cmd)
  448. if result != 0:
  449. raise Exception('Result %s from %s' % (result, cmd))
  450. merge = staticmethod(merge)
  451. # ------------- Dataproviders
  452. # sam does not use '#' to indicate comments/headers - we need to strip out those headers from the std. providers
  453. #TODO:?? seems like there should be an easier way to do this - metadata.comment_char?
  454. @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.line.FilteredLineDataProvider.settings )
  455. def line_dataprovider( self, dataset, **settings ):
  456. settings[ 'comment_char' ] = '@'
  457. return super( Sam, self ).line_dataprovider( dataset, **settings )
  458. @dataproviders.decorators.dataprovider_factory( 'regex-line', dataproviders.line.RegexLineDataProvider.settings )
  459. def regex_line_dataprovider( self, dataset, **settings ):
  460. settings[ 'comment_char' ] = '@'
  461. return super( Sam, self ).regex_line_dataprovider( dataset, **settings )
  462. @dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings )
  463. def column_dataprovider( self, dataset, **settings ):
  464. settings[ 'comment_char' ] = '@'
  465. return super( Sam, self ).column_dataprovider( dataset, **settings )
  466. @dataproviders.decorators.dataprovider_factory( 'dataset-column',
  467. dataproviders.column.ColumnarDataProvider.settings )
  468. def dataset_column_dataprovider( self, dataset, **settings ):
  469. settings[ 'comment_char' ] = '@'
  470. return super( Sam, self ).dataset_column_dataprovider( dataset, **settings )
  471. @dataproviders.decorators.dataprovider_factory( 'dict', dataproviders.column.DictDataProvider.settings )
  472. def dict_dataprovider( self, dataset, **settings ):
  473. settings[ 'comment_char' ] = '@'
  474. return super( Sam, self ).dict_dataprovider( dataset, **settings )
  475. @dataproviders.decorators.dataprovider_factory( 'dataset-dict', dataproviders.column.DictDataProvider.settings )
  476. def dataset_dict_dataprovider( self, dataset, **settings ):
  477. settings[ 'comment_char' ] = '@'
  478. return super( Sam, self ).dataset_dict_dataprovider( dataset, **settings )
  479. @dataproviders.decorators.dataprovider_factory( 'header', dataproviders.line.RegexLineDataProvider.settings )
  480. def header_dataprovider( self, dataset, **settings ):
  481. dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
  482. headers_source = dataproviders.line.RegexLineDataProvider( dataset_source, regex_list=[ '^@' ] )
  483. return dataproviders.line.RegexLineDataProvider( headers_source, **settings )
  484. @dataproviders.decorators.dataprovider_factory( 'id-seq-qual', dict_dataprovider.settings )
  485. def id_seq_qual_dataprovider( self, dataset, **settings ):
  486. # provided as an example of a specified column dict (w/o metadata)
  487. settings[ 'indeces' ] = [ 0, 9, 10 ]
  488. settings[ 'column_names' ] = [ 'id', 'seq', 'qual' ]
  489. return self.dict_dataprovider( dataset, **settings )
  490. @dataproviders.decorators.dataprovider_factory( 'genomic-region',
  491. dataproviders.dataset.GenomicRegionDataProvider.settings )
  492. def genomic_region_dataprovider( self, dataset, **settings ):
  493. settings[ 'comment_char' ] = '@'
  494. return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2, 3, 3, **settings )
  495. @dataproviders.decorators.dataprovider_factory( 'genomic-region-dict',
  496. dataproviders.dataset.GenomicRegionDataProvider.settings )
  497. def genomic_region_dict_dataprovider( self, dataset, **settings ):
  498. settings[ 'comment_char' ] = '@'
  499. return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2, 3, 3, True, **settings )
  500. #@dataproviders.decorators.dataprovider_factory( 'samtools' )
  501. #def samtools_dataprovider( self, dataset, **settings ):
  502. # dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
  503. # return dataproviders.dataset.SamtoolsDataProvider( dataset_source, **settings )
  504. @dataproviders.decorators.has_dataproviders
  505. class Pileup( Tabular ):
  506. """Tab delimited data in pileup (6- or 10-column) format"""
  507. file_ext = "pileup"
  508. line_class = "genomic coordinate"
  509. data_sources = { "data": "tabix" }
  510. """Add metadata elements"""
  511. MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
  512. MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
  513. MetadataElement( name="endCol", default=2, desc="End column", param=metadata.ColumnParameter )
  514. MetadataElement( name="baseCol", default=3, desc="Reference base column", param=metadata.ColumnParameter )
  515. def init_meta( self, dataset, copy_from=None ):
  516. Tabular.init_meta( self, dataset, copy_from=copy_from )
  517. def display_peek( self, dataset ):
  518. """Returns formated html of peek"""
  519. return Tabular.make_html_table( self, dataset, column_parameter_alias={'chromCol':'Chrom', 'startCol':'Start', 'baseCol':'Base'} )
  520. def repair_methods( self, dataset ):
  521. """Return options for removing errors along with a description"""
  522. return [ ("lines", "Remove erroneous lines") ]
  523. def sniff( self, filename ):
  524. """
  525. Checks for 'pileup-ness'
  526. There are two main types of pileup: 6-column and 10-column. For both,
  527. the first three and last two columns are the same. We only check the
  528. first three to allow for some personalization of the format.
  529. >>> fname = get_test_fname( 'interval.interval' )
  530. >>> Pileup().sniff( fname )
  531. False
  532. >>> fname = get_test_fname( '6col.pileup' )
  533. >>> Pileup().sniff( fname )
  534. True
  535. >>> fname = get_test_fname( '10col.pileup' )
  536. >>> Pileup().sniff( fname )
  537. True
  538. """
  539. headers = get_headers( filename, '\t' )
  540. try:
  541. for hdr in headers:
  542. if hdr and not hdr[0].startswith( '#' ):
  543. if len( hdr ) < 3:
  544. return False
  545. try:
  546. # chrom start in column 1 (with 0-based columns)
  547. # and reference base is in column 2
  548. check = int( hdr[1] )
  549. assert hdr[2] in [ 'A', 'C', 'G', 'T', 'N', 'a', 'c', 'g', 't', 'n' ]
  550. except:
  551. return False
  552. return True
  553. except:
  554. return False
  555. # ------------- Dataproviders
  556. @dataproviders.decorators.dataprovider_factory( 'genomic-region',
  557. dataproviders.dataset.GenomicRegionDataProvider.settings )
  558. def genomic_region_dataprovider( self, dataset, **settings ):
  559. return dataproviders.dataset.GenomicRegionDataProvider( dataset, **settings )
  560. @dataproviders.decorators.dataprovider_factory( 'genomic-region-dict',
  561. dataproviders.dataset.GenomicRegionDataProvider.settings )
  562. def genomic_region_dict_dataprovider( self, dataset, **settings ):
  563. settings[ 'named_columns' ] = True
  564. return self.genomic_region_dataprovider( dataset, **settings )
  565. @dataproviders.decorators.has_dataproviders
  566. class Vcf( Tabular ):
  567. """ Variant Call Format for describing SNPs and other simple genome variations. """
  568. track_type = "VariantTrack"
  569. data_sources = { "data": "tabix", "index": "bigwig" }
  570. file_ext = 'vcf'
  571. column_names = [ 'Chrom', 'Pos', 'ID', 'Ref', 'Alt', 'Qual', 'Filter', 'Info', 'Format', 'data' ]
  572. MetadataElement( name="columns", default=10, desc="Number of columns", readonly=True, visible=False )
  573. MetadataElement( name="column_types", default=['str','int','str','str','str','int','str','list','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
  574. MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[5], param=metadata.ColumnParameter, optional=True, multiple=True, visible=False )
  575. MetadataElement( name="sample_names", default=[], desc="Sample names", readonly=True, visible=False, optional=True, no_value=[] )
  576. def sniff( self, filename ):
  577. headers = get_headers( filename, '\n', count=1 )
  578. return headers[0][0].startswith("##fileformat=VCF")
  579. def display_peek( self, dataset ):
  580. """Returns formated html of peek"""
  581. return Tabular.make_html_table( self, dataset, column_names=self.column_names )
  582. def set_meta( self, dataset, **kwd ):
  583. Tabular.set_meta( self, dataset, **kwd )
  584. source = open( dataset.file_name )
  585. # Skip comments.
  586. line = None
  587. for line in source:
  588. if not line.startswith( '##' ):
  589. break
  590. if line and line.startswith( '#' ):
  591. # Found header line, get sample names.
  592. dataset.metadata.sample_names = line.split()[ 9: ]
  593. # ------------- Dataproviders
  594. @dataproviders.decorators.dataprovider_factory( 'genomic-region',
  595. dataproviders.dataset.GenomicRegionDataProvider.settings )
  596. def genomic_region_dataprovider( self, dataset, **settings ):
  597. return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0, 1, 1, **settings )
  598. @dataproviders.decorators.dataprovider_factory( 'genomic-region-dict',
  599. dataproviders.dataset.GenomicRegionDataProvider.settings )
  600. def genomic_region_dict_dataprovider( self, dataset, **settings ):
  601. settings[ 'named_columns' ] = True
  602. return self.genomic_region_dataprovider( dataset, **settings )
  603. class Eland( Tabular ):
  604. """Support for the export.txt.gz file used by Illumina's ELANDv2e aligner"""
  605. file_ext = '_export.txt.gz'
  606. MetadataElement( name="columns", default=0, desc="Number of columns", readonly=True, visible=False )
  607. MetadataElement( name="column_types", default=[], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False, no_value=[] )
  608. MetadataElement( name="comment_lines", default=0, desc="Number of comments", readonly=True, visible=False )
  609. MetadataElement( name="tiles", default=[], param=metadata.ListParameter, desc="Set of tiles", readonly=True, visible=False, no_value=[] )
  610. MetadataElement( name="reads", default=[], param=metadata.ListParameter, desc="Set of reads", readonly=True, visible=False, no_value=[] )
  611. MetadataElement( name="lanes", default=[], param=metadata.ListParameter, desc="Set of lanes", readonly=True, visible=False, no_value=[] )
  612. MetadataElement( name="barcodes", default=[], param=metadata.ListParameter, desc="Set of barcodes", readonly=True, visible=False, no_value=[] )
  613. def __init__(self, **kwd):
  614. """Initialize taxonomy datatype"""
  615. Tabular.__init__( self, **kwd )
  616. self.column_names = ['MACHINE', 'RUN_NO', 'LANE', 'TILE', 'X', 'Y',
  617. 'INDEX', 'READ_NO', 'SEQ', 'QUAL', 'CHROM', 'CONTIG',
  618. 'POSITION', 'STRAND', 'DESC', 'SRAS', 'PRAS', 'PART_CHROM'
  619. 'PART_CONTIG', 'PART_OFFSET', 'PART_STRAND', 'FILT'
  620. ]
  621. def make_html_table( self, dataset, skipchars=None ):
  622. """Create HTML table, used for displaying peek"""
  623. if skipchars is None:
  624. skipchars = []
  625. out = ['<table cellspacing="0" cellpadding="3">']
  626. try:
  627. # Generate column header
  628. out.append( '<tr>' )
  629. for i, name in enumerate( self.column_names ):
  630. out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
  631. # This data type requires at least 11 columns in the data
  632. if dataset.metadata.columns - len( self.column_names ) > 0:
  633. for i in range( len( self.column_names ), dataset.metadata.columns ):
  634. out.append( '<th>%s</th>' % str( i+1 ) )
  635. out.append( '</tr>' )
  636. out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
  637. out.append( '</table>' )
  638. out = "".join( out )
  639. except Exception, exc:
  640. out = "Can't create peek %s" % exc
  641. return out
  642. def sniff( self, filename ):
  643. """
  644. Determines whether the file is in ELAND export format
  645. A file in ELAND export format consists of lines of tab-separated data.
  646. There is no header.
  647. Rules for sniffing as True::
  648. - There must be 22 columns on each line
  649. - LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct
  650. - We will only check that up to the first 5 alignments are correctly formatted.
  651. """
  652. try:
  653. compress = is_gzip(filename)
  654. if compress:
  655. fh = gzip.GzipFile(filename, 'r')
  656. else:
  657. fh = open( filename )
  658. count = 0
  659. while True:
  660. line = fh.readline()
  661. line = line.strip()
  662. if not line:
  663. break #EOF
  664. if line:
  665. linePieces = line.split('\t')
  666. if len(linePieces) != 22:
  667. return False
  668. try:
  669. if long(linePieces[1]) < 0:
  670. raise Exception('Out of range')
  671. if long(linePieces[2]) < 0:
  672. raise Exception('Out of range')
  673. if long(linePieces[3]) < 0:
  674. raise Exception('Out of range')
  675. check = int(linePieces[4])
  676. check = int(linePieces[5])
  677. # can get a lot more specific
  678. except ValueError:
  679. fh.close()
  680. return False
  681. count += 1
  682. if count == 5:
  683. break
  684. if count > 0:
  685. fh.close()
  686. return True
  687. except:
  688. pass
  689. fh.close()
  690. return False
  691. def set_meta( self, dataset, overwrite = True, skip = None, max_data_lines = 5, **kwd ):
  692. if dataset.has_data():
  693. compress = is_gzip(dataset.file_name)
  694. if compress:
  695. dataset_fh = gzip.GzipFile(dataset.file_name, 'r')
  696. else:
  697. dataset_fh = open( dataset.file_name )
  698. lanes = {}
  699. tiles = {}
  700. barcodes = {}
  701. reads = {}
  702. # # Should always read the entire file (until we devise a more clever way to pass metadata on)
  703. #if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
  704. # # If the dataset is larger than optional_metadata, just count comment lines.
  705. # dataset.metadata.data_lines = None
  706. #else:
  707. # # Otherwise, read the whole thing and set num data lines.
  708. for i, line in enumerate(dataset_fh):
  709. if line:
  710. linePieces = line.split('\t')
  711. if len(linePieces) != 22:
  712. raise Exception('%s:%d:Corrupt line!' % (dataset.file_name,i))
  713. lanes[linePieces[2]]=1
  714. tiles[linePieces[3]]=1
  715. barcodes[linePieces[6]]=1
  716. reads[linePieces[7]]=1
  717. pass
  718. dataset.metadata.data_lines = i + 1
  719. dataset_fh.close()
  720. dataset.metadata.comment_lines = 0
  721. dataset.metadata.columns = 21
  722. dataset.metadata.column_types = ['str', 'int', 'int', 'int', 'int', 'int', 'str', 'int', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str', 'str']
  723. dataset.metadata.lanes = lanes.keys()
  724. dataset.metadata.tiles = ["%04d" % int(t) for t in tiles.keys()]
  725. dataset.metadata.barcodes = filter(lambda x: x != '0', barcodes.keys()) + ['NoIndex' for x in barcodes.keys() if x == '0']
  726. dataset.metadata.reads = reads.keys()
  727. class ElandMulti( Tabular ):
  728. file_ext = 'elandmulti'
  729. def sniff( self, filename ):
  730. return False
  731. class FeatureLocationIndex( Tabular ):
  732. """
  733. An index that stores feature locations in tabular format.
  734. """
  735. file_ext='fli'
  736. MetadataElement( name="columns", default=2, desc="Number of columns", readonly=True, visible=False )
  737. MetadataElement( name="column_types", default=['str', 'str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False, no_value=[] )