PageRenderTime 56ms CodeModel.GetById 15ms app.highlight 35ms RepoModel.GetById 1ms app.codeStats 0ms

/tools/data_source/upload.py

https://bitbucket.org/cistrome/cistrome-harvard/
Python | 569 lines | 540 code | 7 blank | 22 comment | 40 complexity | 9a079bdb5122a188944cfb359c048189 MD5 | raw file
  1#!/usr/bin/env python
  2#Processes uploads from the user.
  3
  4# WARNING: Changes in this tool (particularly as related to parsing) may need
  5# to be reflected in galaxy.web.controllers.tool_runner and galaxy.tools
  6
  7import urllib, sys, os, gzip, tempfile, shutil, re, gzip, zipfile, codecs, binascii
  8from galaxy import eggs
  9# need to import model before sniff to resolve a circular import dependency
 10import galaxy.model
 11from galaxy.datatypes.checkers import *
 12from galaxy.datatypes import sniff
 13from galaxy.datatypes.binary import *
 14from galaxy.datatypes.images import Pdf
 15from galaxy.datatypes.registry import Registry
 16from galaxy import util
 17from galaxy.datatypes.util.image_util import *
 18from galaxy.util.json import *
 19
 20try:
 21    import Image as PIL
 22except ImportError:
 23    try:
 24        from PIL import Image as PIL
 25    except:
 26        PIL = None
 27
 28try:
 29    import bz2
 30except:
 31    bz2 = None
 32
 33assert sys.version_info[:2] >= ( 2, 4 )
 34
 35def stop_err( msg, ret=1 ):
 36    sys.stderr.write( msg )
 37    sys.exit( ret )
 38def file_err( msg, dataset, json_file ):
 39    json_file.write( to_json_string( dict( type = 'dataset',
 40                                           ext = 'data',
 41                                           dataset_id = dataset.dataset_id,
 42                                           stderr = msg ) ) + "\n" )
 43    # never remove a server-side upload
 44    if dataset.type in ( 'server_dir', 'path_paste' ):
 45        return
 46    try:
 47        os.remove( dataset.path )
 48    except:
 49        pass
 50def safe_dict(d):
 51    """
 52    Recursively clone json structure with UTF-8 dictionary keys
 53    http://mellowmachines.com/blog/2009/06/exploding-dictionary-with-unicode-keys-as-python-arguments/
 54    """
 55    if isinstance(d, dict):
 56        return dict([(k.encode('utf-8'), safe_dict(v)) for k,v in d.iteritems()])
 57    elif isinstance(d, list):
 58        return [safe_dict(x) for x in d]
 59    else:
 60        return d
 61def check_html( temp_name, chunk=None ):
 62    if chunk is None:
 63        temp = open(temp_name, "U")
 64    else:
 65        temp = chunk
 66    regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
 67    regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
 68    regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
 69    regexp4 = re.compile( "<META[^>]*>", re.I )
 70    regexp5 = re.compile( "<SCRIPT[^>]*>", re.I )
 71    lineno = 0
 72    for line in temp:
 73        lineno += 1
 74        matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line ) or regexp5.search( line )
 75        if matches:
 76            if chunk is None:
 77                temp.close()
 78            return True
 79        if lineno > 100:
 80            break
 81    if chunk is None:
 82        temp.close()
 83    return False
 84def check_binary( temp_name ):
 85    is_binary = False
 86    temp = open( temp_name, "U" )
 87    chars_read = 0
 88    for chars in temp:
 89        for char in chars:
 90            chars_read += 1
 91            if ord( char ) > 128:
 92                is_binary = True
 93                break
 94            if chars_read > 100:
 95                break
 96        if chars_read > 100:
 97            break
 98    temp.close()
 99    return is_binary
100def check_bam( file_path ):
101    return Bam().sniff( file_path )
102def check_sff( file_path ):
103    return Sff().sniff( file_path )
104def check_pdf( file_path ):
105    return Pdf().sniff( file_path )
106def check_bigwig( file_path ):
107    return BigWig().sniff( file_path )
108def check_bigbed( file_path ):
109    return BigBed().sniff( file_path )
110def check_cel( filename ):
111    return Cel().sniff( filename )
112def check_gzip( temp_name ):
113    # This method returns a tuple of booleans representing ( is_gzipped, is_valid )
114    # Make sure we have a gzipped file
115    try:
116        temp = open( temp_name, "U" )
117        magic_check = temp.read( 2 )
118        temp.close()
119        if magic_check != util.gzip_magic:
120            return ( False, False )
121    except:
122        return ( False, False )
123    # We support some binary data types, so check if the compressed binary file is valid
124    # If the file is Bam, it should already have been detected as such, so we'll just check
125    # for sff format.
126    try:
127        header = gzip.open( temp_name ).read(4)
128        if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ):
129            return ( True, True )
130    except:
131        return( False, False )
132    CHUNK_SIZE = 2**15 # 32Kb
133    gzipped_file = gzip.GzipFile( temp_name, mode='rb' )
134    chunk = gzipped_file.read( CHUNK_SIZE )
135    gzipped_file.close()
136    # See if we have a compressed HTML file
137    if check_html( temp_name, chunk=chunk ):
138        return ( True, False )
139    return ( True, True )
140def check_bz2( temp_name ):
141    try:
142        temp = open( temp_name, "U" )
143        magic_check = temp.read( 3 )
144        temp.close()
145        if magic_check != util.bz2_magic:
146            return ( False, False )
147    except:
148        return( False, False )
149    CHUNK_SIZE = 2**15 # reKb
150    bzipped_file = bz2.BZ2File( temp_name, mode='rb' )
151    chunk = bzipped_file.read( CHUNK_SIZE )
152    bzipped_file.close()
153    # See if we have a compressed HTML file
154    if check_html( temp_name, chunk=chunk ):
155        return ( True, False )
156    return ( True, True )
157def check_zip( temp_name ):
158    if zipfile.is_zipfile( temp_name ):
159        return True
160    return False
161def check_zip_for_expression( temp_name ):
162    # Return: (is_zip, known_ext, exactly_one_pheno, gt_one, homogeneous, ext)
163    if not zipfile.is_zipfile( temp_name ):
164        return (False, False, False, False, False, None)
165    zip_file = zipfile.ZipFile( temp_name, "r" )
166    # Make sure the archive consists of valid files.  The current rules are:
167    # 1. The file type in the zip is homegeneous, except that there is exactly one .txt pheno file
168    # 2. The rest of the files must be either .cel or .xys
169    # 3. There must be at least two .cel or .xys
170
171    hasPheno = False
172    count = 0
173    test_ext = None
174    for name in zip_file.namelist():
175        fileBaseName = os.path.basename(name)
176        if(fileBaseName=="" or fileBaseName.startswith=="." or name.startswith("__MACOSX")):
177            # ignore folder name, hidden file in *nix, or extra resource forks by Max OSX ZIP software.
178            continue
179        #Reason:modification to support folder in zip file
180        #ext = name.split(".")[1].strip().lower()
181        ext = os.path.splitext( name )[1].strip().lower().replace(".","")
182        count += 1
183
184        if (not (ext == "txt" or ext == "cel" or ext == "xys")):
185            #return (True, False, False, False, False, ext)
186            continue
187
188        if (ext == "txt"):
189            if (hasPheno):
190                return (True, True, False, False, False, None)
191            else:
192                hasPheno = True
193        elif (test_ext == None):
194            test_ext = ext
195        elif (ext != test_ext):
196            return (True, True, True, True, False, None)
197            
198    zip_file.close()
199    return ( True, True, hasPheno, (count >= 3), True, test_ext )
200
201def parse_outputs( args ):
202    rval = {}
203    for arg in args:
204        id, files_path, path = arg.split( ':', 2 )
205        rval[int( id )] = ( path, files_path )
206    return rval
207def add_file( dataset, registry, json_file, output_path ):
208    data_type = None
209    line_count = None
210    converted_path = None
211    stdout = None
212    link_data_only = dataset.get( 'link_data_only', 'copy_files' )
213    in_place = dataset.get( 'in_place', True )
214
215    try:
216        ext = dataset.file_type
217    except AttributeError:
218        file_err( 'Unable to process uploaded file, missing file_type parameter.', dataset, json_file )
219        return
220
221    if dataset.type == 'url':
222        try:
223            page = urllib.urlopen( dataset.path ) #page will be .close()ed by sniff methods
224            temp_name, dataset.is_multi_byte = sniff.stream_to_file( page, prefix='url_paste', source_encoding=util.get_charset_from_http_headers( page.headers ) )
225        except Exception, e:
226            file_err( 'Unable to fetch %s\n%s' % ( dataset.path, str( e ) ), dataset, json_file )
227            return
228        dataset.path = temp_name
229    # See if we have an empty file
230    if not os.path.exists( dataset.path ):
231        file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
232        return
233    if not os.path.getsize( dataset.path ) > 0:
234        file_err( 'The uploaded file is empty', dataset, json_file )
235        return
236    if not dataset.type == 'url':
237        # Already set is_multi_byte above if type == 'url'
238        try:
239            dataset.is_multi_byte = util.is_multi_byte( codecs.open( dataset.path, 'r', 'utf-8' ).read( 100 ) )
240        except UnicodeDecodeError, e:
241            dataset.is_multi_byte = False
242    # Is dataset an image?
243    image = check_image( dataset.path )
244    if image:
245        if not PIL:
246            image = None
247        # get_image_ext() returns None if nor a supported Image type
248        ext = get_image_ext( dataset.path, image )
249        data_type = ext
250    # Is dataset content multi-byte?
251    elif dataset.is_multi_byte:
252        data_type = 'multi-byte char'
253        ext = sniff.guess_ext( dataset.path, is_multi_byte=True )
254    # Is dataset content supported sniffable binary?
255    elif check_bam( dataset.path ):
256        ext = 'bam'
257        data_type = 'bam'
258    elif check_sff( dataset.path ):
259        ext = 'sff'
260        data_type = 'sff'
261    elif check_pdf( dataset.path ):
262        ext = 'pdf'
263        data_type = 'pdf'
264    elif check_bigwig( dataset.path ):
265        ext = 'bigwig'
266        data_type = 'bigwig'
267    elif check_bigbed( dataset.path ):
268        ext = 'bigbed'
269        data_type = 'bigbed'
270    elif check_cel( dataset.name ):
271        ext = 'cel'
272        data_type = 'cel'
273    else:
274        type_info = Binary.is_sniffable_binary( dataset.path )
275        if type_info:
276            data_type = type_info[0]
277            ext = type_info[1]
278    if not data_type:
279        # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
280        is_gzipped, is_valid = check_gzip( dataset.path )
281        if is_gzipped and not is_valid:
282            file_err( 'The gzipped uploaded file contains inappropriate content', dataset, json_file )
283            return
284        elif is_gzipped and is_valid:
285            if link_data_only == 'copy_files':
286                # We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
287                CHUNK_SIZE = 2**20 # 1Mb
288                fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_gunzip_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
289                gzipped_file = gzip.GzipFile( dataset.path, 'rb' )
290                while 1:
291                    try:
292                        chunk = gzipped_file.read( CHUNK_SIZE )
293                    except IOError:
294                        os.close( fd )
295                        os.remove( uncompressed )
296                        file_err( 'Problem decompressing gzipped data', dataset, json_file )
297                        return
298                    if not chunk:
299                        break
300                    os.write( fd, chunk )
301                os.close( fd )
302                gzipped_file.close()
303                # Replace the gzipped file with the decompressed file if it's safe to do so
304                if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
305                    dataset.path = uncompressed
306                else:
307                    shutil.move( uncompressed, dataset.path )
308                os.chmod(dataset.path, 0644)
309            dataset.name = dataset.name.rstrip( '.gz' )
310            data_type = 'gzip'
311        if not data_type and bz2 is not None:
312            # See if we have a bz2 file, much like gzip
313            is_bzipped, is_valid = check_bz2( dataset.path )
314            if is_bzipped and not is_valid:
315                file_err( 'The gzipped uploaded file contains inappropriate content', dataset, json_file )
316                return
317            elif is_bzipped and is_valid:
318                if link_data_only == 'copy_files':
319                    # We need to uncompress the temp_name file
320                    CHUNK_SIZE = 2**20 # 1Mb
321                    fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_bunzip2_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
322                    bzipped_file = bz2.BZ2File( dataset.path, 'rb' )
323                    while 1:
324                        try:
325                            chunk = bzipped_file.read( CHUNK_SIZE )
326                        except IOError:
327                            os.close( fd )
328                            os.remove( uncompressed )
329                            file_err( 'Problem decompressing bz2 compressed data', dataset, json_file )
330                            return
331                        if not chunk:
332                            break
333                        os.write( fd, chunk )
334                    os.close( fd )
335                    bzipped_file.close()
336                    # Replace the bzipped file with the decompressed file if it's safe to do so
337                    if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
338                        dataset.path = uncompressed
339                    else:
340                        shutil.move( uncompressed, dataset.path )
341                    os.chmod(dataset.path, 0644)
342                dataset.name = dataset.name.rstrip( '.bz2' )
343                data_type = 'bz2'
344        if not data_type:
345            # See if we have a zip archive for expression data
346            is_zipped_for_expression, known_ext, one_pheno, gt_one, homogeneous, test_ext = check_zip_for_expression( dataset.path )
347            if (not is_zipped_for_expression):
348                pass
349            else:
350                if (not one_pheno):
351                    file_err("There must be exactly one .txt pheno file in the zip at %s." % one_pheno, dataset, json_file)
352                if (not gt_one):
353                    file_err("There must be more than one CEL or XYS file in the zip.", dataset, json_file)
354                if (not homogeneous):
355                    file_err("Except the .txt pheno file, other files must be all CEL or XYS.", dataset, json_file)
356
357                data_type = 'zip_for_expression'
358                if (test_ext == 'cel'):
359                    ext = 'cel.zip'
360                    file_type = 'cel.zip'
361                else:
362                    ext = 'xys.zip'
363                    file_type = 'xys.zip'
364
365        if not data_type:
366            # See if we have a zip archive
367            is_zipped = check_zip( dataset.path )
368            if is_zipped:
369                if link_data_only == 'copy_files':
370                    CHUNK_SIZE = 2**20 # 1Mb
371                    uncompressed = None
372                    uncompressed_name = None
373                    unzipped = False
374                    z = zipfile.ZipFile( dataset.path )
375                    for name in z.namelist():
376                        if name.endswith('/'):
377                            continue
378                        if unzipped:
379                            stdout = 'ZIP file contained more than one file, only the first file was added to Galaxy.'
380                            break
381                        fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_zip_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
382                        if sys.version_info[:2] >= ( 2, 6 ):
383                            zipped_file = z.open( name )
384                            while 1:
385                                try:
386                                    chunk = zipped_file.read( CHUNK_SIZE )
387                                except IOError:
388                                    os.close( fd )
389                                    os.remove( uncompressed )
390                                    file_err( 'Problem decompressing zipped data', dataset, json_file )
391                                    return
392                                if not chunk:
393                                    break
394                                os.write( fd, chunk )
395                            os.close( fd )
396                            zipped_file.close()
397                            uncompressed_name = name
398                            unzipped = True
399                        else:
400                            # python < 2.5 doesn't have a way to read members in chunks(!)
401                            try:
402                                outfile = open( uncompressed, 'wb' )
403                                outfile.write( z.read( name ) )
404                                outfile.close()
405                                uncompressed_name = name
406                                unzipped = True
407                            except IOError:
408                                os.close( fd )
409                                os.remove( uncompressed )
410                                file_err( 'Problem decompressing zipped data', dataset, json_file )
411                                return
412                    z.close()
413                    # Replace the zipped file with the decompressed file if it's safe to do so
414                    if uncompressed is not None:
415                        if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
416                            dataset.path = uncompressed
417                        else:
418                            shutil.move( uncompressed, dataset.path )
419                        os.chmod(dataset.path, 0644)
420                        dataset.name = uncompressed_name
421                data_type = 'zip'
422        if not data_type:
423            if check_binary( dataset.path ):
424                # We have a binary dataset, but it is not Bam, Sff or Pdf
425                data_type = 'binary'
426                #binary_ok = False
427                parts = dataset.name.split( "." )
428                if len( parts ) > 1:
429                    ext = parts[-1].strip().lower()
430                    if not Binary.is_ext_unsniffable(ext):
431                        file_err( 'The uploaded binary file contains inappropriate content', dataset, json_file )
432                        return
433                    elif Binary.is_ext_unsniffable(ext) and dataset.file_type != ext:
434                        err_msg = "You must manually set the 'File Format' to '%s' when uploading %s files." % ( ext.capitalize(), ext )
435                        file_err( err_msg, dataset, json_file )
436                        return
437        if not data_type:
438            # We must have a text file
439            if check_html( dataset.path ):
440                file_err( 'The uploaded file contains inappropriate HTML content', dataset, json_file )
441                return
442        if data_type != 'binary' and data_type != 'zip_for_expression':
443            if link_data_only == 'copy_files':
444                if dataset.type in ( 'server_dir', 'path_paste' ) and data_type not in [ 'gzip', 'bz2', 'zip' ]:
445                    in_place = False
446                # Convert universal line endings to Posix line endings, but allow the user to turn it off,
447                # so that is becomes possible to upload gzip, bz2 or zip files with binary data without
448                # corrupting the content of those files.
449                if dataset.to_posix_lines:
450                    if dataset.space_to_tab:
451                        line_count, converted_path = sniff.convert_newlines_sep2tabs( dataset.path, in_place=in_place )
452                    else:
453                        line_count, converted_path = sniff.convert_newlines( dataset.path, in_place=in_place )
454            if dataset.file_type == 'auto':
455                ext = sniff.guess_ext( dataset.path, registry.sniff_order )
456            else:
457                ext = dataset.file_type
458            data_type = ext
459    # Save job info for the framework
460    if ext == 'auto' and dataset.ext:
461        ext = dataset.ext
462    if ext == 'auto':
463        ext = 'data'
464    datatype = registry.get_datatype_by_extension( ext )
465    if dataset.type in ( 'server_dir', 'path_paste' ) and link_data_only == 'link_to_files':
466        # Never alter a file that will not be copied to Galaxy's local file store.
467        if datatype.dataset_content_needs_grooming( dataset.path ):
468            err_msg = 'The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be ' + \
469                '<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed.'
470            file_err( err_msg, dataset, json_file )
471            return
472    if link_data_only == 'copy_files' and dataset.type in ( 'server_dir', 'path_paste' ) and data_type not in [ 'gzip', 'bz2', 'zip' ]:
473        # Move the dataset to its "real" path
474        if converted_path is not None:
475            shutil.copy( converted_path, output_path )
476            try:
477                os.remove( converted_path )
478            except:
479                pass
480        else:
481            # This should not happen, but it's here just in case
482            shutil.copy( dataset.path, output_path )
483    elif link_data_only == 'copy_files':
484        shutil.move( dataset.path, output_path )
485    # Write the job info
486    stdout = stdout or 'uploaded %s file' % data_type
487    info = dict( type = 'dataset',
488                 dataset_id = dataset.dataset_id,
489                 ext = ext,
490                 stdout = stdout,
491                 name = dataset.name,
492                 line_count = line_count )
493    if dataset.get('uuid', None) is not None:
494        info['uuid'] = dataset.get('uuid')
495    json_file.write( to_json_string( info ) + "\n" )
496
497    if link_data_only == 'copy_files' and datatype.dataset_content_needs_grooming( output_path ):
498        # Groom the dataset content if necessary
499        datatype.groom_dataset_content( output_path )
500
501def add_composite_file( dataset, registry, json_file, output_path, files_path ):
502        if dataset.composite_files:
503            os.mkdir( files_path )
504            for name, value in dataset.composite_files.iteritems():
505                value = util.bunch.Bunch( **value )
506                if dataset.composite_file_paths[ value.name ] is None and not value.optional:
507                    file_err( 'A required composite data file was not provided (%s)' % name, dataset, json_file )
508                    break
509                elif dataset.composite_file_paths[value.name] is not None:
510                    dp = dataset.composite_file_paths[value.name][ 'path' ]
511                    isurl = dp.find('://') <> -1 # todo fixme
512                    if isurl:
513                       try:
514                           temp_name, dataset.is_multi_byte = sniff.stream_to_file( urllib.urlopen( dp ), prefix='url_paste' )
515                       except Exception, e:
516                           file_err( 'Unable to fetch %s\n%s' % ( dp, str( e ) ), dataset, json_file )
517                           return
518                       dataset.path = temp_name
519                       dp = temp_name
520                    if not value.is_binary:
521                        if dataset.composite_file_paths[ value.name ].get( 'space_to_tab', value.space_to_tab ):
522                            sniff.convert_newlines_sep2tabs( dp )
523                        else:
524                            sniff.convert_newlines( dp )
525                    shutil.move( dp, os.path.join( files_path, name ) )
526        # Move the dataset to its "real" path
527        shutil.move( dataset.primary_file, output_path )
528        # Write the job info
529        info = dict( type = 'dataset',
530                     dataset_id = dataset.dataset_id,
531                     stdout = 'uploaded %s file' % dataset.file_type )
532        json_file.write( to_json_string( info ) + "\n" )
533
534def __main__():
535
536    if len( sys.argv ) < 4:
537        print >>sys.stderr, 'usage: upload.py <root> <datatypes_conf> <json paramfile> <output spec> ...'
538        sys.exit( 1 )
539
540    output_paths = parse_outputs( sys.argv[4:] )
541    json_file = open( 'galaxy.json', 'w' )
542
543    registry = Registry()
544    registry.load_datatypes( root_dir=sys.argv[1], config=sys.argv[2] )
545
546    for line in open( sys.argv[3], 'r' ):
547        dataset = from_json_string( line )
548        dataset = util.bunch.Bunch( **safe_dict( dataset ) )
549        try:
550            output_path = output_paths[int( dataset.dataset_id )][0]
551        except:
552            print >>sys.stderr, 'Output path for dataset %s not found on command line' % dataset.dataset_id
553            sys.exit( 1 )
554        if dataset.type == 'composite':
555            files_path = output_paths[int( dataset.dataset_id )][1]
556            add_composite_file( dataset, registry, json_file, output_path, files_path )
557        else:
558            add_file( dataset, registry, json_file, output_path )
559
560    # clean up paramfile
561    # TODO: this will not work when running as the actual user unless the
562    # parent directory is writable by the user.
563    try:
564        os.remove( sys.argv[3] )
565    except:
566        pass
567
568if __name__ == '__main__':
569    __main__()