PageRenderTime 32ms CodeModel.GetById 2ms app.highlight 25ms RepoModel.GetById 1ms app.codeStats 1ms

/tools/expression/upload.py

https://bitbucket.org/cistrome/cistrome-harvard/
Python | 363 lines | 335 code | 6 blank | 22 comment | 36 complexity | 93825743bfbd1a2179749ffdd7a4cd16 MD5 | raw file
  1#!/usr/bin/env python
  2#Processes uploads from the user.
  3
  4# WARNING: Changes in this tool (particularly as related to parsing) may need
  5# to be reflected in galaxy.web.controllers.tool_runner and galaxy.tools
  6
  7import urllib, sys, os, gzip, tempfile, shutil, re, gzip, zipfile, codecs, binascii
  8from galaxy import eggs
  9# need to import model before sniff to resolve a circular import dependency
 10import galaxy.model
 11from galaxy.datatypes import sniff
 12from galaxy.datatypes.binary import *
 13from galaxy.datatypes.registry import Registry
 14from galaxy import util
 15from galaxy.util.json import *
 16
 17assert sys.version_info[:2] >= ( 2, 4 )
 18
 19def stop_err( msg, ret=1 ):
 20    sys.stderr.write( msg )
 21    sys.exit( ret )
 22def file_err( msg, dataset, json_file ):
 23    json_file.write( to_json_string( dict( type = 'dataset',
 24                                           ext = 'data',
 25                                           dataset_id = dataset.dataset_id,
 26                                           stderr = msg ) ) + "\n" )
 27    try:
 28        os.remove( dataset.path )
 29    except:
 30        pass
 31def safe_dict(d):
 32    """
 33    Recursively clone json structure with UTF-8 dictionary keys
 34    http://mellowmachines.com/blog/2009/06/exploding-dictionary-with-unicode-keys-as-python-arguments/
 35    """
 36    if isinstance(d, dict):
 37        return dict([(k.encode('utf-8'), safe_dict(v)) for k,v in d.iteritems()])
 38    elif isinstance(d, list):
 39        return [safe_dict(x) for x in d]
 40    else:
 41        return d
 42def check_html( temp_name, chunk=None ):
 43    if chunk is None:
 44        temp = open(temp_name, "U")
 45    else:
 46        temp = chunk
 47    regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
 48    regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
 49    regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
 50    regexp4 = re.compile( "<META[^>]*>", re.I )
 51    regexp5 = re.compile( "<SCRIPT[^>]*>", re.I )
 52    lineno = 0
 53    for line in temp:
 54        lineno += 1
 55        matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line ) or regexp5.search( line )
 56        if matches:
 57            if chunk is None:
 58                temp.close()
 59            return True
 60        if lineno > 100:
 61            break
 62    if chunk is None:
 63        temp.close()
 64    return False
 65def check_binary( temp_name ):
 66    is_binary = False
 67    temp = open( temp_name, "U" )
 68    chars_read = 0
 69    for chars in temp:
 70        for char in chars:
 71            chars_read += 1
 72            if ord( char ) > 128:
 73                is_binary = True
 74                break
 75            if chars_read > 100:
 76                break
 77        if chars_read > 100:
 78            break
 79    temp.close()
 80    return is_binary
 81def check_bam( temp_name ):
 82    return Bam().sniff( temp_name )
 83def check_sff( temp_name ):
 84    return Sff().sniff( temp_name )
 85def check_gzip( temp_name ):
 86    # This method returns a tuple of booleans representing ( is_gzipped, is_valid )
 87    # Make sure we have a gzipped file
 88    try:
 89        temp = open( temp_name, "U" )
 90        magic_check = temp.read( 2 )
 91        temp.close()
 92        if magic_check != util.gzip_magic:
 93            return ( False, False )
 94    except:
 95        return ( False, False )
 96    # We support some binary data types, so check if the compressed binary file is valid
 97    # If the file is Bam, it should already have been detected as such, so we'll just check
 98    # for sff format.
 99    try:
100        header = gzip.open( temp_name ).read(4)
101        if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ):
102            return ( True, True )
103    except:
104        return( False, False )
105    CHUNK_SIZE = 2**15 # 32Kb
106    gzipped_file = gzip.GzipFile( temp_name, mode='rb' )
107    chunk = gzipped_file.read( CHUNK_SIZE )
108    gzipped_file.close()
109    # See if we have a compressed HTML file
110    if check_html( temp_name, chunk=chunk ):
111        return ( True, False )
112    return ( True, True )
113
114def check_zip( temp_name ):
115        # Return: (is_zip, known_ext, exactly_one_pheno, gt_one, homogeneous, ext)
116        if not zipfile.is_zipfile( temp_name ):
117            return (False, False, False, False, False, None)
118        zip_file = zipfile.ZipFile( temp_name, "r" )
119        # Make sure the archive consists of valid files.  The current rules are:
120        # 1. The file type in the zip is homegeneous, except that there is exactly one .txt pheno file
121        # 2. The rest of the files must be either .cel or .xys
122        # 3. There must be at least two .cel or .xys
123
124        hasPheno = False
125        count = 0
126        test_ext = None
127        for name in zip_file.namelist():
128            #Reason:modification to support folder in zip file
129            #ext = name.split(".")[1].strip().lower()
130            ext = os.path.splitext( name )[1].strip().lower().replace(".","")
131            if(ext==""): #ignore folder
132                continue
133            count += 1
134
135            if (not (ext == "txt" or ext == "cel" or ext == "xys")):
136                return (True, False, False, False, False, ext)
137
138            if (ext == "txt"):
139                if (hasPheno):
140                    return (True, True, False, False, False, None)
141                else:
142                    hasPheno = True
143            elif (test_ext == None):
144                test_ext = ext
145            elif (ext != test_ext):
146                return (True, True, True, True, False, None)
147
148        zip_file.close()
149        return ( True, True, hasPheno, (count >= 3), True, test_ext )
150
151
152def parse_outputs( args ):
153    rval = {}
154    for arg in args:
155        id, files_path, path = arg.split( ':', 2 )
156        rval[int( id )] = ( path, files_path )
157    return rval
158def add_file( dataset, json_file, output_path ):
159    data_type = None
160    line_count = None
161    converted_path = None
162    stdout = None
163    link_data_only = dataset.get( 'link_data_only', 'copy_files' )
164    
165    if dataset.type == 'url':
166        try:
167            temp_name, dataset.is_multi_byte = sniff.stream_to_file( urllib.urlopen( dataset.path ), prefix='url_paste' )
168        except Exception, e:
169            file_err( 'Unable to fetch %s\n%s' % ( dataset.path, str( e ) ), dataset, json_file )
170            return
171        dataset.path = temp_name
172    # See if we have an empty file
173    if not os.path.exists( dataset.path ):
174        file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
175        return
176    if not os.path.getsize( dataset.path ) > 0:
177        file_err( 'The uploaded file is empty', dataset, json_file )
178        return
179    if not dataset.type == 'url':
180        # Already set is_multi_byte above if type == 'url'
181        try:
182            dataset.is_multi_byte = util.is_multi_byte( codecs.open( dataset.path, 'r', 'utf-8' ).read( 100 ) )
183        except UnicodeDecodeError, e:
184            dataset.is_multi_byte = False
185    # Is dataset content multi-byte?
186    if dataset.is_multi_byte:
187        data_type = 'multi-byte char'
188        ext = sniff.guess_ext( dataset.path, is_multi_byte=True )
189    # Is dataset content supported sniffable binary?
190    elif check_bam( dataset.path ):
191        ext = 'bam'
192        data_type = 'bam'
193    elif check_sff( dataset.path ):
194        ext = 'sff'
195        data_type = 'sff'
196    else:
197        # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
198        is_gzipped, is_valid = check_gzip( dataset.path )
199        if is_gzipped and not is_valid:
200            file_err( 'The uploaded file contains inappropriate content', dataset, json_file )
201            return
202        elif is_gzipped and is_valid:
203            # We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
204            CHUNK_SIZE = 2**20 # 1Mb   
205            fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_gunzip_' % dataset.dataset_id, dir=os.path.dirname( dataset.path ), text=False )
206            gzipped_file = gzip.GzipFile( dataset.path, 'rb' )
207            while 1:
208                try:
209                    chunk = gzipped_file.read( CHUNK_SIZE )
210                except IOError:
211                    os.close( fd )
212                    os.remove( uncompressed )
213                    file_err( 'Problem decompressing gzipped data', dataset, json_file )
214                    return
215                if not chunk:
216                    break
217                os.write( fd, chunk )
218            os.close( fd )
219            gzipped_file.close()
220            # Replace the gzipped file with the decompressed file
221            shutil.move( uncompressed, dataset.path )
222            dataset.name = dataset.name.rstrip( '.gz' )
223            data_type = 'gzip'
224        if not data_type:
225            # See if we have a zip archive
226            is_zipped, known_ext, one_pheno, gt_one, homogeneous, test_ext = check_zip( dataset.path )
227            if (not is_zipped):
228                file_err("CEL or NimbleGen files must be zipped.", dataset, json_file)
229            if (not known_ext):
230                file_err("Unknown file type in zip: " + test_ext, dataset, json_file)
231            if (not one_pheno):
232               file_err("There must be exactly one .txt pheno file in the zip.", dataset, json_file)
233            if (not gt_one):
234                file_err("There must be more than one CEL or XYS file in the zip.", dataset, json_file)
235            if (not homogeneous):
236                file_err("Except the .txt pheno file, other files must be all CEL or XYS.", dataset, json_file)
237
238            data_type = 'zip'
239            if (test_ext == 'cel'):
240                ext = 'cel.zip'
241                file_type = 'cel.zip'
242            else:
243                ext = 'xys.zip'
244                file_type = 'xys.zip'
245        if not data_type:
246            if check_binary( dataset.path ):
247                # We have a binary dataset, but it is not Bam or Sff
248                data_type = 'binary'
249                #binary_ok = False
250                parts = dataset.name.split( "." )
251                if len( parts ) > 1:
252                    ext = parts[1].strip().lower()
253                    if ext not in unsniffable_binary_formats:
254                        file_err( 'The uploaded file contains inappropriate content', dataset, json_file )
255                        return
256                    elif ext in unsniffable_binary_formats and dataset.file_type != ext:
257                        err_msg = "You must manually set the 'File Format' to '%s' when uploading %s files." % ( ext.capitalize(), ext )
258                        file_err( err_msg, dataset, json_file )
259                        return
260        if not data_type:
261            # We must have a text file
262            if check_html( dataset.path ):
263                file_err( 'The uploaded file contains inappropriate content', dataset, json_file )
264                return
265        if data_type != 'binary' and data_type != 'zip':
266            # don't convert newlines on data we're only going to symlink
267            if link_data_only == 'copy_files':
268                in_place = True
269                if dataset.type in ( 'server_dir', 'path_paste' ):
270                    in_place = False
271                if dataset.space_to_tab:
272                    line_count, converted_path = sniff.convert_newlines_sep2tabs( dataset.path, in_place=in_place )
273                else:
274                    line_count, converted_path = sniff.convert_newlines( dataset.path, in_place=in_place )
275            if dataset.file_type == 'auto':
276                ext = sniff.guess_ext( dataset.path )
277            else:
278                ext = dataset.file_type
279            data_type = ext
280    # Save job info for the framework
281    if ext == 'auto' and dataset.ext:
282        ext = dataset.ext
283    if ext == 'auto':
284        ext = 'data'
285    # Move the dataset to its "real" path
286    if link_data_only == 'link_to_files':
287        pass # data will remain in place
288    elif dataset.type in ( 'server_dir', 'path_paste' ):
289        if converted_path is not None:
290            shutil.copy( converted_path, output_path )
291            try:
292                os.remove( converted_path )
293            except:
294                pass
295        else:
296            # this should not happen, but it's here just in case
297            shutil.copy( dataset.path, output_path )
298    else:
299        shutil.move( dataset.path, output_path )
300    # Write the job info
301    info = dict( type = 'dataset',
302                 dataset_id = dataset.dataset_id,
303                 ext = ext,
304                 stdout = 'uploaded %s file' % data_type,
305                 name = dataset.name,
306                 line_count = line_count )
307    json_file.write( to_json_string( info ) + "\n" )
308    # Groom the dataset content if necessary
309    datatype = Registry().get_datatype_by_extension( ext )
310    datatype.groom_dataset_content( output_path )
311
312def add_composite_file( dataset, json_file, output_path, files_path ):
313        if dataset.composite_files:
314            os.mkdir( files_path )
315            for name, value in dataset.composite_files.iteritems():
316                value = util.bunch.Bunch( **value )
317                if dataset.composite_file_paths[ value.name ] is None and not value.optional:
318                    file_err( 'A required composite data file was not provided (%s)' % name, dataset, json_file )
319                    break
320                elif dataset.composite_file_paths[value.name] is not None:
321                    if not value.is_binary:
322                        if uploaded_dataset.composite_files[ value.name ].space_to_tab:
323                            sniff.convert_newlines_sep2tabs( dataset.composite_file_paths[ value.name ][ 'path' ] )
324                        else:
325                            sniff.convert_newlines( dataset.composite_file_paths[ value.name ][ 'path' ] )
326                    shutil.move( dataset.composite_file_paths[ value.name ][ 'path' ], os.path.join( files_path, name ) )
327        # Move the dataset to its "real" path
328        shutil.move( dataset.primary_file, output_path )
329        # Write the job info
330        info = dict( type = 'dataset',
331                     dataset_id = dataset.dataset_id,
332                     stdout = 'uploaded %s file' % dataset.file_type )
333        json_file.write( to_json_string( info ) + "\n" )
334
335def __main__():
336
337    if len( sys.argv ) < 2:
338        print >>sys.stderr, 'usage: upload.py <json paramfile> <output spec> ...'
339        sys.exit( 1 )
340
341    output_paths = parse_outputs( sys.argv[2:] )
342    json_file = open( 'galaxy.json', 'w' )
343    for line in open( sys.argv[1], 'r' ):
344        dataset = from_json_string( line )
345        dataset = util.bunch.Bunch( **safe_dict( dataset ) )
346        try:
347            output_path = output_paths[int( dataset.dataset_id )][0]
348        except:
349            print >>sys.stderr, 'Output path for dataset %s not found on command line' % dataset.dataset_id
350            sys.exit( 1 )
351        if dataset.type == 'composite':
352            files_path = output_paths[int( dataset.dataset_id )][1]
353            add_composite_file( dataset, json_file, output_path, files_path )
354        else:
355            add_file( dataset, json_file, output_path )
356    # clean up paramfile
357    try:
358        os.remove( sys.argv[1] )
359    except:
360        pass
361
362if __name__ == '__main__':
363    __main__()