PageRenderTime 28ms CodeModel.GetById 11ms RepoModel.GetById 1ms app.codeStats 0ms

/lib/galaxy/datatypes/sniff.py

https://bitbucket.org/cistrome/cistrome-harvard/
Python | 420 lines | 411 code | 5 blank | 4 comment | 1 complexity | 7ac6eb87d01eebfe6122631529191f81 MD5 | raw file
  1. """
  2. File format detector
  3. """
  4. import gzip
  5. import logging
  6. import os
  7. import re
  8. import registry
  9. import shutil
  10. import sys
  11. import tempfile
  12. import zipfile
  13. import csv
  14. from binary import Binary
  15. from encodings import search_function as encodings_search_function
  16. from galaxy import util
  17. from galaxy.datatypes.checkers import check_binary, check_html, is_gzip
  18. from galaxy.datatypes.binary import Binary
  19. log = logging.getLogger(__name__)
  20. def get_test_fname(fname):
  21. """Returns test data filename"""
  22. path, name = os.path.split(__file__)
  23. full_path = os.path.join(path, 'test', fname)
  24. return full_path
  25. def stream_to_open_named_file( stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict' ):
  26. """Writes a stream to the provided file descriptor, returns the file's name and bool( is_multi_byte ). Closes file descriptor"""
  27. #signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better
  28. CHUNK_SIZE = 1048576
  29. data_checked = False
  30. is_compressed = False
  31. is_binary = False
  32. is_multi_byte = False
  33. if not target_encoding or not encodings_search_function( target_encoding ):
  34. target_encoding = util.DEFAULT_ENCODING #utf-8
  35. if not source_encoding:
  36. source_encoding = util.DEFAULT_ENCODING #sys.getdefaultencoding() would mimic old behavior (defaults to ascii)
  37. while 1:
  38. chunk = stream.read( CHUNK_SIZE )
  39. if not chunk:
  40. break
  41. if not data_checked:
  42. # See if we're uploading a compressed file
  43. if zipfile.is_zipfile( filename ):
  44. is_compressed = True
  45. else:
  46. try:
  47. if unicode( chunk[:2] ) == unicode( util.gzip_magic ):
  48. is_compressed = True
  49. except:
  50. pass
  51. if not is_compressed:
  52. # See if we have a multi-byte character file
  53. chars = chunk[:100]
  54. is_multi_byte = util.is_multi_byte( chars )
  55. if not is_multi_byte:
  56. is_binary = util.is_binary( chunk )
  57. data_checked = True
  58. if not is_compressed and not is_binary:
  59. if not isinstance( chunk, unicode ):
  60. chunk = chunk.decode( source_encoding, source_error )
  61. os.write( fd, chunk.encode( target_encoding, target_error ) )
  62. else:
  63. # Compressed files must be encoded after they are uncompressed in the upload utility,
  64. # while binary files should not be encoded at all.
  65. os.write( fd, chunk )
  66. os.close( fd )
  67. return filename, is_multi_byte
  68. def stream_to_file( stream, suffix='', prefix='', dir=None, text=False, **kwd ):
  69. """Writes a stream to a temporary file, returns the temporary file's name"""
  70. fd, temp_name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir, text=text )
  71. return stream_to_open_named_file( stream, fd, temp_name, **kwd )
  72. def check_newlines( fname, bytes_to_read=52428800 ):
  73. """
  74. Determines if there are any non-POSIX newlines in the first
  75. number_of_bytes (by default, 50MB) of the file.
  76. """
  77. CHUNK_SIZE = 2 ** 20
  78. f = open( fname, 'r' )
  79. for chunk in f.read( CHUNK_SIZE ):
  80. if f.tell() > bytes_to_read:
  81. break
  82. if chunk.count( '\r' ):
  83. f.close()
  84. return True
  85. f.close()
  86. return False
  87. def convert_newlines( fname, in_place=True ):
  88. """
  89. Converts in place a file from universal line endings
  90. to Posix line endings.
  91. >>> fname = get_test_fname('temp.txt')
  92. >>> file(fname, 'wt').write("1 2\\r3 4")
  93. >>> convert_newlines(fname)
  94. (2, None)
  95. >>> file(fname).read()
  96. '1 2\\n3 4\\n'
  97. """
  98. fd, temp_name = tempfile.mkstemp()
  99. fp = os.fdopen( fd, "wt" )
  100. i = None
  101. for i, line in enumerate( file( fname, "U" ) ):
  102. fp.write( "%s\n" % line.rstrip( "\r\n" ) )
  103. fp.close()
  104. if i is None:
  105. i = 0
  106. else:
  107. i += 1
  108. if in_place:
  109. shutil.move( temp_name, fname )
  110. # Return number of lines in file.
  111. return ( i, None )
  112. else:
  113. return ( i, temp_name )
  114. def sep2tabs( fname, in_place=True, patt="\\s+" ):
  115. """
  116. Transforms in place a 'sep' separated file to a tab separated one
  117. >>> fname = get_test_fname('temp.txt')
  118. >>> file(fname, 'wt').write("1 2\\n3 4\\n")
  119. >>> sep2tabs(fname)
  120. (2, None)
  121. >>> file(fname).read()
  122. '1\\t2\\n3\\t4\\n'
  123. """
  124. regexp = re.compile( patt )
  125. fd, temp_name = tempfile.mkstemp()
  126. fp = os.fdopen( fd, "wt" )
  127. i = None
  128. for i, line in enumerate( file( fname ) ):
  129. line = line.rstrip( '\r\n' )
  130. elems = regexp.split( line )
  131. fp.write( "%s\n" % '\t'.join( elems ) )
  132. fp.close()
  133. if i is None:
  134. i = 0
  135. else:
  136. i += 1
  137. if in_place:
  138. shutil.move( temp_name, fname )
  139. # Return number of lines in file.
  140. return ( i, None )
  141. else:
  142. return ( i, temp_name )
  143. def convert_newlines_sep2tabs( fname, in_place=True, patt="\\s+" ):
  144. """
  145. Combines above methods: convert_newlines() and sep2tabs()
  146. so that files do not need to be read twice
  147. >>> fname = get_test_fname('temp.txt')
  148. >>> file(fname, 'wt').write("1 2\\r3 4")
  149. >>> convert_newlines_sep2tabs(fname)
  150. (2, None)
  151. >>> file(fname).read()
  152. '1\\t2\\n3\\t4\\n'
  153. """
  154. regexp = re.compile( patt )
  155. fd, temp_name = tempfile.mkstemp()
  156. fp = os.fdopen( fd, "wt" )
  157. for i, line in enumerate( file( fname, "U" ) ):
  158. line = line.rstrip( '\r\n' )
  159. elems = regexp.split( line )
  160. fp.write( "%s\n" % '\t'.join( elems ) )
  161. fp.close()
  162. if in_place:
  163. shutil.move( temp_name, fname )
  164. # Return number of lines in file.
  165. return ( i + 1, None )
  166. else:
  167. return ( i + 1, temp_name )
  168. def get_headers( fname, sep, count=60, is_multi_byte=False ):
  169. """
  170. Returns a list with the first 'count' lines split by 'sep'
  171. >>> fname = get_test_fname('complete.bed')
  172. >>> get_headers(fname,'\\t')
  173. [['chr7', '127475281', '127491632', 'NM_000230', '0', '+', '127486022', '127488767', '0', '3', '29,172,3225,', '0,10713,13126,'], ['chr7', '127486011', '127488900', 'D49487', '0', '+', '127486022', '127488767', '0', '2', '155,490,', '0,2399']]
  174. """
  175. headers = []
  176. for idx, line in enumerate(file(fname)):
  177. line = line.rstrip('\n\r')
  178. if is_multi_byte:
  179. # TODO: fix this - sep is never found in line
  180. line = unicode( line, 'utf-8' )
  181. sep = sep.encode( 'utf-8' )
  182. headers.append( line.split(sep) )
  183. if idx == count:
  184. break
  185. return headers
  186. def is_column_based( fname, sep='\t', skip=0, is_multi_byte=False ):
  187. """
  188. Checks whether the file is column based with respect to a separator
  189. (defaults to tab separator).
  190. >>> fname = get_test_fname('test.gff')
  191. >>> is_column_based(fname)
  192. True
  193. >>> fname = get_test_fname('test_tab.bed')
  194. >>> is_column_based(fname)
  195. True
  196. >>> is_column_based(fname, sep=' ')
  197. False
  198. >>> fname = get_test_fname('test_space.txt')
  199. >>> is_column_based(fname)
  200. False
  201. >>> is_column_based(fname, sep=' ')
  202. True
  203. >>> fname = get_test_fname('test_ensembl.tab')
  204. >>> is_column_based(fname)
  205. True
  206. >>> fname = get_test_fname('test_tab1.tabular')
  207. >>> is_column_based(fname, sep=' ', skip=0)
  208. False
  209. >>> fname = get_test_fname('test_tab1.tabular')
  210. >>> is_column_based(fname)
  211. True
  212. """
  213. headers = get_headers( fname, sep, is_multi_byte=is_multi_byte )
  214. count = 0
  215. if not headers:
  216. return False
  217. for hdr in headers[skip:]:
  218. if hdr and hdr[0] and not hdr[0].startswith('#'):
  219. if len(hdr) > 1:
  220. count = len(hdr)
  221. break
  222. if count < 2:
  223. return False
  224. for hdr in headers[skip:]:
  225. if hdr and hdr[0] and not hdr[0].startswith('#'):
  226. if len(hdr) != count:
  227. return False
  228. return True
  229. def guess_ext( fname, sniff_order=None, is_multi_byte=False ):
  230. """
  231. Returns an extension that can be used in the datatype factory to
  232. generate a data for the 'fname' file
  233. >>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
  234. >>> guess_ext(fname)
  235. 'xml'
  236. >>> fname = get_test_fname('interval.interval')
  237. >>> guess_ext(fname)
  238. 'interval'
  239. >>> fname = get_test_fname('interval1.bed')
  240. >>> guess_ext(fname)
  241. 'bed'
  242. >>> fname = get_test_fname('test_tab.bed')
  243. >>> guess_ext(fname)
  244. 'bed'
  245. >>> fname = get_test_fname('sequence.maf')
  246. >>> guess_ext(fname)
  247. 'maf'
  248. >>> fname = get_test_fname('sequence.fasta')
  249. >>> guess_ext(fname)
  250. 'fasta'
  251. >>> fname = get_test_fname('file.html')
  252. >>> guess_ext(fname)
  253. 'html'
  254. >>> fname = get_test_fname('test.gtf')
  255. >>> guess_ext(fname)
  256. 'gtf'
  257. >>> fname = get_test_fname('test.gff')
  258. >>> guess_ext(fname)
  259. 'gff'
  260. >>> fname = get_test_fname('gff_version_3.gff')
  261. >>> guess_ext(fname)
  262. 'gff3'
  263. >>> fname = get_test_fname('temp.txt')
  264. >>> file(fname, 'wt').write("a\\t2\\nc\\t1\\nd\\t0")
  265. >>> guess_ext(fname)
  266. 'tabular'
  267. >>> fname = get_test_fname('temp.txt')
  268. >>> file(fname, 'wt').write("a 1 2 x\\nb 3 4 y\\nc 5 6 z")
  269. >>> guess_ext(fname)
  270. 'txt'
  271. >>> fname = get_test_fname('test_tab1.tabular')
  272. >>> guess_ext(fname)
  273. 'tabular'
  274. >>> fname = get_test_fname('alignment.lav')
  275. >>> guess_ext(fname)
  276. 'lav'
  277. >>> fname = get_test_fname('1.sff')
  278. >>> guess_ext(fname)
  279. 'sff'
  280. >>> fname = get_test_fname('1.bam')
  281. >>> guess_ext(fname)
  282. 'bam'
  283. >>> fname = get_test_fname('3unsorted.bam')
  284. >>> guess_ext(fname)
  285. 'bam'
  286. """
  287. if sniff_order is None:
  288. datatypes_registry = registry.Registry()
  289. datatypes_registry.load_datatypes()
  290. sniff_order = datatypes_registry.sniff_order
  291. for datatype in sniff_order:
  292. """
  293. Some classes may not have a sniff function, which is ok. In fact, the
  294. Tabular and Text classes are 2 examples of classes that should never have
  295. a sniff function. Since these classes are default classes, they contain
  296. few rules to filter out data of other formats, so they should be called
  297. from this function after all other datatypes in sniff_order have not been
  298. successfully discovered.
  299. """
  300. try:
  301. if datatype.sniff( fname ):
  302. return datatype.file_ext
  303. except:
  304. pass
  305. headers = get_headers( fname, None )
  306. is_binary = False
  307. if is_multi_byte:
  308. is_binary = False
  309. else:
  310. for hdr in headers:
  311. for char in hdr:
  312. #old behavior had 'char' possibly having length > 1,
  313. #need to determine when/if this occurs
  314. is_binary = util.is_binary( char )
  315. if is_binary:
  316. break
  317. if is_binary:
  318. break
  319. if is_binary:
  320. return 'data' #default binary data type file extension
  321. if is_column_based( fname, '\t', 1, is_multi_byte=is_multi_byte ):
  322. return 'tabular' #default tabular data type file extension
  323. return 'txt' #default text data type file extension
  324. def handle_compressed_file( filename, datatypes_registry, ext = 'auto' ):
  325. CHUNK_SIZE = 2**20 # 1Mb
  326. is_compressed = False
  327. compressed_type = None
  328. keep_compressed = False
  329. is_valid = False
  330. for compressed_type, check_compressed_function in COMPRESSION_CHECK_FUNCTIONS:
  331. is_compressed = check_compressed_function( filename )
  332. if is_compressed:
  333. break #found compression type
  334. if is_compressed:
  335. if ext in AUTO_DETECT_EXTENSIONS:
  336. check_exts = COMPRESSION_DATATYPES[ compressed_type ]
  337. elif ext in COMPRESSED_EXTENSIONS:
  338. check_exts = [ ext ]
  339. else:
  340. check_exts = []
  341. for compressed_ext in check_exts:
  342. compressed_datatype = datatypes_registry.get_datatype_by_extension( compressed_ext )
  343. if compressed_datatype.sniff( filename ):
  344. ext = compressed_ext
  345. keep_compressed = True
  346. is_valid = True
  347. break
  348. if not is_compressed:
  349. is_valid = True
  350. elif not keep_compressed:
  351. is_valid = True
  352. fd, uncompressed = tempfile.mkstemp()
  353. compressed_file = DECOMPRESSION_FUNCTIONS[ compressed_type ]( filename )
  354. while True:
  355. try:
  356. chunk = compressed_file.read( CHUNK_SIZE )
  357. except IOError, e:
  358. os.close( fd )
  359. os.remove( uncompressed )
  360. compressed_file.close()
  361. raise IOError, 'Problem uncompressing %s data, please try retrieving the data uncompressed: %s' % ( compressed_type, e )
  362. if not chunk:
  363. break
  364. os.write( fd, chunk )
  365. os.close( fd )
  366. compressed_file.close()
  367. # Replace the compressed file with the uncompressed file
  368. shutil.move( uncompressed, filename )
  369. return is_valid, ext
  370. def handle_uploaded_dataset_file( filename, datatypes_registry, ext = 'auto', is_multi_byte = False ):
  371. is_valid, ext = handle_compressed_file( filename, datatypes_registry, ext = ext )
  372. if not is_valid:
  373. raise InappropriateDatasetContentError, 'The compressed uploaded file contains inappropriate content.'
  374. if ext in AUTO_DETECT_EXTENSIONS:
  375. ext = guess_ext( filename, sniff_order = datatypes_registry.sniff_order, is_multi_byte=is_multi_byte )
  376. if check_binary( filename ):
  377. if not Binary.is_ext_unsniffable(ext) and not datatypes_registry.get_datatype_by_extension( ext ).sniff( filename ):
  378. raise InappropriateDatasetContentError, 'The binary uploaded file contains inappropriate content.'
  379. elif check_html( filename ):
  380. raise InappropriateDatasetContentError, 'The uploaded file contains inappropriate HTML content.'
  381. return ext
  382. AUTO_DETECT_EXTENSIONS = [ 'auto' ] #should 'data' also cause auto detect?
  383. DECOMPRESSION_FUNCTIONS = dict( gzip = gzip.GzipFile )
  384. COMPRESSION_CHECK_FUNCTIONS = [ ( 'gzip', is_gzip ) ]
  385. COMPRESSION_DATATYPES = dict( gzip = [ 'bam' ] )
  386. COMPRESSED_EXTENSIONS = []
  387. for exts in COMPRESSION_DATATYPES.itervalues(): COMPRESSED_EXTENSIONS.extend( exts )
  388. class InappropriateDatasetContentError( Exception ):
  389. pass
  390. if __name__ == '__main__':
  391. import doctest
  392. doctest.testmod(sys.modules[__name__])