PageRenderTime 2520ms CodeModel.GetById 192ms app.highlight 2154ms RepoModel.GetById 157ms app.codeStats 1ms

/src/echonest/audio.py

http://echo-nest-remix.googlecode.com/
Python | 1688 lines | 1552 code | 38 blank | 98 comment | 116 complexity | 600447a1cf6b3d7256ca1d8f80b95e91 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1"""
   2The main `Echo Nest`_ `Remix API`_ module for manipulating audio files and 
   3their associated `Echo Nest`_ `Analyze API`_ analyses.
   4
   5AudioData, and getpieces by Robert Ochshorn
   6on 2008-06-06.  Some refactoring and everything else by Joshua Lifton
   72008-09-07.  Refactoring by Ben Lacker 2009-02-11. Other contributions
   8by Adam Lindsay.
   9
  10:group Base Classes: AudioAnalysis, AudioRenderable, AudioData, AudioData32
  11:group Audio-plus-Analysis Classes: AudioFile, LocalAudioFile, LocalAnalysis
  12:group Building Blocks: AudioQuantum, AudioSegment, AudioQuantumList, ModifiedRenderable
  13:group Effects: AudioEffect, LevelDB, AmplitudeFactor, TimeTruncateFactor, TimeTruncateLength, Simultaneous
  14:group Exception Classes: FileTypeError, EchoNestRemixError
  15
  16:group Audio helper functions: getpieces, mix, assemble, megamix
  17:group ffmpeg helper functions: ffmpeg, settings_from_ffmpeg, ffmpeg_error_check
  18:group Utility functions: chain_from_mixed, _dataParser, _attributeParser, _segmentsParser
  19
  20.. _Analyze API: http://developer.echonest.com/pages/overview?version=2
  21.. _Remix API: http://code.google.com/p/echo-nest-remix/
  22.. _Echo Nest: http://the.echonest.com/
  23"""
  24
  25__version__ = "$Revision: 0 $"
  26# $Source$
  27
  28import hashlib
  29import numpy
  30import os
  31import sys
  32import StringIO
  33import struct
  34import subprocess
  35import tempfile
  36import wave
  37
  38from pyechonest import track
  39import pyechonest.util
  40import echonest.selection as selection
  41import pyechonest.config as config
  42#from echonest.support import stupidxml
  43import xml.etree.ElementTree as etree
  44import xml.dom.minidom as minidom
  45import weakref
  46
  47class AudioAnalysis(object):
  48    """
  49    This class uses (but does not wrap) `pyechonest.track` to allow 
  50    transparent caching of the audio analysis of an audio file.
  51    
  52    For example, the following script will display the bars of a track
  53    twice::
  54    
  55        from echonest import *
  56        a = audio.AudioAnalysis('YOUR_TRACK_ID_HERE')
  57        a.bars
  58        a.bars
  59    
  60    The first time `a.bars` is called, a network request is made of the
  61    `Echo Nest`_ `Analyze API`_.  The second time time `a.bars` is called, the
  62    cached value is returned immediately.
  63    
  64    An `AudioAnalysis` object can be created using an existing ID, as in
  65    the example above, or by specifying the audio file to upload in
  66    order to create the ID, as in::
  67    
  68        a = audio.AudioAnalysis('FULL_PATH_TO_AUDIO_FILE')
  69    
  70    .. _Analyze API: http://developer.echonest.com/pages/overview?version=2
  71    .. _Echo Nest: http://the.echonest.com/
  72    """
  73    
  74    def __init__(self, path_or_identifier):
  75        """
  76        Constructor.  If the argument is a valid local path or a URL,
  77        the track ID is generated by uploading the file to the `Echo Nest`_ 
  78        `Analyze API`_\.  Otherwise, the argument is assumed to be
  79        the track ID.
  80        
  81        :param path_or_identifier: A string representing either a path to a local 
  82            file, or the ID of a file that has already 
  83            been uploaded for analysis.
  84        
  85        .. _Analyze API: http://developer.echonest.com/docs/v4/track.html 
  86        .. _Echo Nest: http://the.echonest.com/
  87        """
  88        
  89        if type(path_or_identifier) is not str:
  90            # Argument is invalid.
  91            raise TypeError("Argument 'path_or_identifier' must be a string \
  92                            representing either a filename, track ID, or MD5.")
  93
  94        # see if path_or_identifier is a path or an ID
  95        if os.path.isfile(path_or_identifier):
  96            # it's a filename
  97            self.pyechonest_track = track.track_from_filename(path_or_identifier)
  98        else:
  99            if path_or_identifier.startswith('music://') or \
 100                    (path_or_identifier.startswith('TR') and \
 101                    len(path_or_identifier) == 18):
 102                # it's an id
 103                self.pyechonest_track = track.track_from_id(path_or_identifier)
 104            elif len(path_or_identifier) == 32:
 105                # it's an md5
 106                self.pyechonest_track = track.track_from_md5(path_or_identifier)
 107
 108        if self.pyechonest_track is None:
 109            raise EchoNestRemixError('Could not find track %s' % path_or_identifier)
 110
 111        self.source = None
 112        
 113        self._bars = None
 114        self._beats = None
 115        self._tatums = None
 116        self._sections = None
 117        self._segments = None
 118
 119        self.identifier = self.pyechonest_track.id
 120        self.metadata   = self.pyechonest_track.meta
 121
 122
 123        for attribute in ('time_signature', 'mode', 'tempo', 'key'):
 124            d = {}
 125            d['value']      = getattr(self.pyechonest_track, attribute)
 126            d['confidence'] = getattr(self.pyechonest_track, attribute + '_confidence')
 127            setattr(self, attribute, d)
 128
 129        for attribute in ('end_of_fade_in', 'start_of_fade_out', 'duration', 'loudness'):
 130            setattr(self, attribute, getattr(self.pyechonest_track, attribute))
 131
 132    @property
 133    def bars(self):
 134        if self._bars is None:
 135            self._bars = _dataParser('bar', self.pyechonest_track.bars)
 136            self._bars.attach(self)
 137        return self._bars
 138
 139    @property
 140    def beats(self):
 141        if self._beats is None:
 142            self._beats = _dataParser('beat', self.pyechonest_track.beats)
 143            self._beats.attach(self)
 144        return self._beats
 145
 146    @property
 147    def tatums(self):
 148        if self._tatums is None:
 149            self._tatums = _dataParser('tatum', self.pyechonest_track.tatums)
 150            self._tatums.attach(self)
 151        return self._tatums
 152    
 153    @property
 154    def sections(self):
 155        if self._sections is None:
 156            self._sections = _attributeParser('section', self.pyechonest_track.sections)
 157            self._sections.attach(self)
 158        return self._sections
 159    
 160    @property
 161    def segments(self):
 162        if self._segments is None:
 163            self._segments = _segmentsParser(self.pyechonest_track.segments)
 164            self._segments.attach(self)
 165        return self._segments
 166    
 167    def __getstate__(self):
 168        """
 169        Eliminates the circular reference for pickling.
 170        """
 171        dictclone = self.__dict__.copy()
 172        del dictclone['source']
 173        return dictclone
 174    
 175    def __setstate__(self, state):
 176        """
 177        Recreates circular references after unpickling.
 178        """
 179        self.__dict__.update(state)
 180        if hasattr(AudioAnalysis, 'CACHED_VARIABLES'):
 181            for cached_var in AudioAnalysis.CACHED_VARIABLES:
 182                if type(object.__getattribute__(self, cached_var)) == AudioQuantumList:
 183                    object.__getattribute__(self, cached_var).attach(self)
 184
 185class AudioRenderable(object):
 186    """
 187    An object that gives an `AudioData` in response to a call to its `render`\()
 188    method.
 189    Intended to be an abstract class that helps enforce the `AudioRenderable` 
 190    protocol. Picked up a couple of convenience methods common to many descendants.
 191    
 192    Every `AudioRenderable` must provide three things:
 193    
 194    render()
 195        A method returning the `AudioData` for the object. The rhythmic duration (point
 196        at which any following audio is appended) is signified by the `endindex` accessor,
 197        measured in samples.
 198    source
 199        An accessor pointing to the `AudioData` that contains the original sample data of
 200        (a superset of) this audio object.
 201    duration
 202        An accessor returning the rhythmic duration (in seconds) of the audio object.
 203    """
 204    def resolve_source(self, alt):
 205        """
 206        Given an alternative, fallback `alt` source, return either `self`'s
 207        source or the alternative. Throw an informative error if no source
 208        is found.
 209        
 210        Utility code that ended up being replicated in several places, so
 211        it ended up here. Not necessary for use in the RenderableAudioObject
 212        protocol.
 213        """
 214        if hasattr(self, 'source'):
 215            source = self.source
 216        else:
 217            if isinstance(alt, AudioData):
 218                source = alt
 219            else:
 220                print >> sys.stderr, self.__repr__()
 221                raise EchoNestRemixError("%s has no implicit or explicit source \
 222                                                during rendering." %
 223                                                (self.__class__.__name__, ))
 224        return source
 225    
 226    @staticmethod
 227    def init_audio_data(source, num_samples):
 228        """
 229        Convenience function for rendering: return a pre-allocated, zeroed 
 230        `AudioData`.
 231        """
 232        if source.numChannels > 1:
 233            newchans = source.numChannels
 234            newshape = (num_samples, newchans)
 235        else:
 236            newchans = 1
 237            newshape = (num_samples,)
 238        return AudioData32(shape=newshape, sampleRate=source.sampleRate, 
 239                            numChannels=newchans, defer=False)
 240        
 241    
 242    def sources(self):
 243        return set([self.source])
 244    
 245    def encode(self, filename):
 246        """
 247        Shortcut function that takes care of the need to obtain an `AudioData`
 248        object first, through `render`.
 249        """
 250        self.render().encode(filename)
 251
 252
 253class AudioData(AudioRenderable):
 254    """
 255    Handles audio data transparently. A smart audio container
 256    with accessors that include:
 257        
 258    sampleRate
 259        samples per second
 260    numChannels
 261        number of channels
 262    data
 263        a `numpy.array`_ 
 264        
 265    .. _numpy.array: http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html
 266    """
 267    def __init__(self, filename=None, ndarray = None, shape=None, sampleRate=None, numChannels=None, defer=False, verbose=True):
 268        """
 269        Given an input `ndarray`, import the sample values and shape 
 270        (if none is specified) of the input `numpy.array`.
 271        
 272        Given a `filename` (and an input ndarray), use ffmpeg to convert
 273        the file to wave, then load the file into the data, 
 274        auto-detecting the sample rate, and number of channels.
 275        
 276        :param filename: a path to an audio file for loading its sample 
 277            data into the AudioData.data
 278        :param ndarray: a `numpy.array`_ instance with sample data
 279        :param shape: a tuple of array dimensions
 280        :param sampleRate: sample rate, in Hz
 281        :param numChannels: number of channels
 282        
 283        .. _numpy.array: http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html
 284        """
 285        self.verbose = verbose
 286        if (filename is not None) and (ndarray is None) :
 287            if sampleRate is None or numChannels is None:
 288                # force sampleRate and numChannels to 44100 hz, 2
 289                sampleRate, numChannels = 44100, 2
 290                parsestring = ffmpeg(filename, overwrite=False, verbose=self.verbose)
 291                ffmpeg_error_check(parsestring[1])
 292                sampleRate, numChannels = settings_from_ffmpeg(parsestring[1])
 293        self.defer = defer
 294        self.filename = filename
 295        self.sampleRate = sampleRate
 296        self.numChannels = numChannels
 297        self.convertedfile = None
 298        self.endindex = 0
 299        if shape is None and isinstance(ndarray, numpy.ndarray) and not self.defer:
 300            self.data = numpy.zeros(ndarray.shape, dtype=numpy.int16)
 301        elif shape is not None and not self.defer:
 302            self.data = numpy.zeros(shape, dtype=numpy.int16)
 303        elif not self.defer and self.filename:
 304            self.data = None
 305            self.load()
 306        else:
 307            self.data = None
 308        if ndarray is not None and self.data is not None:
 309            self.endindex = len(ndarray)
 310            self.data[0:self.endindex] = ndarray
 311    
 312    def load(self):
 313        if isinstance(self.data, numpy.ndarray):
 314            return
 315        temp_file_handle = None
 316        if self.filename.lower().endswith(".wav") and (self.sampleRate, self.numChannels) == (44100, 2):
 317            file_to_read = self.filename
 318        elif self.convertedfile:
 319            file_to_read = self.convertedfile
 320        else:
 321            temp_file_handle, self.convertedfile = tempfile.mkstemp(".wav")
 322            result = ffmpeg(self.filename, self.convertedfile, overwrite=True, 
 323                numChannels=self.numChannels, sampleRate=self.sampleRate, verbose=self.verbose)
 324            ffmpeg_error_check(result[1])
 325            file_to_read = self.convertedfile
 326        
 327        w = wave.open(file_to_read, 'r')
 328        numFrames = w.getnframes()
 329        raw = w.readframes(numFrames)
 330        sampleSize = numFrames * self.numChannels
 331        data = numpy.frombuffer(raw, dtype="<h", count=sampleSize)
 332        ndarray = numpy.array(data, dtype=numpy.int16)
 333        if self.numChannels > 1:
 334            ndarray.resize((numFrames, self.numChannels))
 335        self.data = numpy.zeros(ndarray.shape, dtype=numpy.int16)
 336        self.endindex = 0
 337        if ndarray is not None:
 338            self.endindex = len(ndarray)
 339            self.data = ndarray
 340        if temp_file_handle is not None:
 341            os.close(temp_file_handle)
 342        w.close()
 343        
 344    def __getitem__(self, index):
 345        """
 346        Fetches a frame or slice. Returns an individual frame (if the index 
 347        is a time offset float or an integer sample number) or a slice if 
 348        the index is an `AudioQuantum` (or quacks like one).
 349        """
 350        if not isinstance(self.data, numpy.ndarray) and self.defer:
 351            self.load()
 352        if isinstance(index, float):
 353            index = int(index*self.sampleRate)
 354        elif hasattr(index, "start") and hasattr(index, "duration"):
 355            index =  slice(float(index.start), index.start + index.duration)
 356        
 357        if isinstance(index, slice):
 358            if ( hasattr(index.start, "start") and 
 359                 hasattr(index.stop, "duration") and 
 360                 hasattr(index.stop, "start") ) :
 361                index = slice(index.start.start, index.stop.start + index.stop.duration)
 362        
 363        if isinstance(index, slice):
 364            return self.getslice(index)
 365        else:
 366            return self.getsample(index)
 367    
 368    def getslice(self, index):
 369        "Help `__getitem__` return a new AudioData for a given slice"
 370        if not isinstance(self.data, numpy.ndarray) and self.defer:
 371            self.load()
 372        if isinstance(index.start, float):
 373            index = slice(int(index.start * self.sampleRate), 
 374                            int(index.stop * self.sampleRate), index.step)
 375        return AudioData(None, self.data[index], sampleRate=self.sampleRate, 
 376                            numChannels=self.numChannels, defer=False)
 377    
 378    def getsample(self, index):
 379        """
 380        Help `__getitem__` return a frame (all channels for a given 
 381        sample index)
 382        """
 383        if not isinstance(self.data, numpy.ndarray) and self.defer:
 384            self.load()
 385        if isinstance(index, int):
 386            return self.data[index]
 387        else:
 388            #let the numpy array interface be clever
 389            return AudioData(None, self.data[index], defer=False)
 390    
 391    def pad_with_zeros(self, num_samples):
 392        if num_samples > 0:
 393            if self.numChannels == 1:
 394                extra_shape = (num_samples,)
 395            else:
 396                extra_shape = (num_samples, self.numChannels)
 397            self.data = numpy.append(self.data, 
 398                                     numpy.zeros(extra_shape, dtype=numpy.int16), axis=0)
 399        
 400    def append(self, another_audio_data):
 401        "Appends the input to the end of this `AudioData`."
 402        extra = len(another_audio_data.data) - (len(self.data) - self.endindex) 
 403        self.pad_with_zeros(extra)
 404        self.data[self.endindex : self.endindex + len(another_audio_data)] = self.data[self.endindex : self.endindex + len(another_audio_data)] + another_audio_data.data
 405        self.endindex += another_audio_data.endindex
 406    
 407    def sum(self, another_audio_data):
 408        extra = len(another_audio_data.data) - len(self.data)
 409        self.pad_with_zeros(extra)
 410        compare_limit = min(len(another_audio_data.data), len(self.data)) - 1
 411        self.data[ : compare_limit] += another_audio_data.data[ : compare_limit]
 412    
 413    def add_at(self, time, another_audio_data):
 414        """
 415        Adds the input `another_audio_data` to this `AudioData` 
 416        at the `time` specified in seconds.
 417        """
 418        offset = int(time * self.sampleRate)
 419        extra = offset + len(another_audio_data.data) - len(self.data)
 420        self.pad_with_zeros(extra)
 421        if another_audio_data.numChannels < self.numChannels:
 422            another_audio_data.data = numpy.repeat(another_audio_data.data, self.numChannels).reshape(len(another_audio_data), self.numChannels)
 423        self.data[offset : offset + len(another_audio_data.data)] += another_audio_data.data 
 424    
 425    def __len__(self):
 426        if self.data is not None:
 427            return len(self.data)
 428        else:
 429            return 0
 430
 431    def __add__(self, other):
 432        """Supports stuff like this: sound3 = sound1 + sound2"""
 433        return assemble([self, other], numChannels=self.numChannels, 
 434                            sampleRate=self.sampleRate)
 435        
 436    def encode(self, filename=None, mp3=None):
 437        """
 438        Outputs an MP3 or WAVE file to `filename`.
 439        Format is determined by `mp3` parameter.
 440        """
 441        if not mp3 and filename.lower().endswith('.wav'):
 442            mp3 = False
 443        else:
 444            mp3 = True
 445        if mp3:
 446            foo, tempfilename = tempfile.mkstemp(".wav")        
 447            os.close(foo)
 448        else:
 449            tempfilename = filename
 450        fid = open(tempfilename, 'wb')
 451        # Based on Scipy svn
 452        # http://projects.scipy.org/pipermail/scipy-svn/2007-August/001189.html
 453        fid.write('RIFF')
 454        fid.write(struct.pack('<i',0)) # write a 0 for length now, we'll go back and add it later
 455        fid.write('WAVE')
 456        # fmt chunk
 457        fid.write('fmt ')
 458        if self.data.ndim == 1:
 459            noc = 1
 460        else:
 461            noc = self.data.shape[1]
 462        bits = self.data.dtype.itemsize * 8
 463        sbytes = self.sampleRate * (bits / 8) * noc
 464        ba = noc * (bits / 8)
 465        fid.write(struct.pack('<ihHiiHH', 16, 1, noc, self.sampleRate, sbytes, ba, bits))
 466        # data chunk
 467        fid.write('data')
 468        fid.write(struct.pack('<i', self.data.nbytes))
 469        self.data.tofile(fid)
 470        # Determine file size and place it in correct
 471        # position at start of the file. 
 472        size = fid.tell()
 473        fid.seek(4)
 474        fid.write(struct.pack('<i', size - 8))
 475        fid.close()
 476        if not mp3:
 477            return tempfilename
 478        # now convert it to mp3
 479        if not filename.lower().endswith('.mp3'):
 480            filename = filename + '.mp3'
 481        try:
 482            bitRate = config.MP3_BITRATE
 483        except (NameError, AttributeError):
 484            bitRate = 128
 485        parsestring = ffmpeg(tempfilename, filename, bitRate=bitRate, verbose=self.verbose)
 486        ffmpeg_error_check(parsestring[1])
 487        if tempfilename != filename:
 488            if self.verbose:
 489                print >> sys.stderr, "Deleting: %s" % tempfilename
 490            os.remove(tempfilename)
 491        return filename
 492    
 493    def unload(self):
 494        self.data = None
 495        if self.convertedfile:
 496            if self.verbose:
 497                print >> sys.stderr, "Deleting: %s" % self.convertedfile
 498            os.remove(self.convertedfile)
 499            self.convertedfile = None
 500    
 501    def render(self, start=0.0, to_audio=None, with_source=None):
 502        if not to_audio:
 503            return self
 504        if with_source != self:
 505            return
 506        to_audio.add_at(start, self)
 507        return    
 508    
 509    @property
 510    def duration(self):
 511        return float(self.endindex) / self.sampleRate
 512    
 513    @property
 514    def source(self):
 515        return self
 516    
 517
 518class AudioData32(AudioData):
 519    """A 32-bit variant of AudioData, intended for data collection on 
 520    audio rendering with headroom."""
 521    def __init__(self, filename=None, ndarray = None, shape=None, sampleRate=None, numChannels=None, defer=False, verbose=True):
 522        """
 523        Special form of AudioData to allow for headroom when collecting samples.
 524        """
 525        self.verbose = verbose
 526        if (filename is not None) and (ndarray is None) :
 527            if sampleRate is None or numChannels is None:
 528                # force sampleRate and numChannels to 44100 hz, 2
 529                sampleRate, numChannels = 44100, 2
 530                parsestring = ffmpeg(filename, overwrite=False, verbose=self.verbose)
 531                ffmpeg_error_check(parsestring[1])
 532                sampleRate, numChannels = settings_from_ffmpeg(parsestring[1])
 533        self.defer = defer
 534        self.filename = filename
 535        self.sampleRate = sampleRate
 536        self.numChannels = numChannels
 537        self.convertedfile = None
 538        self.normalized = None
 539        if shape is None and isinstance(ndarray, numpy.ndarray) and not self.defer:
 540            self.data = numpy.zeros(ndarray.shape, dtype=numpy.int32)
 541        elif shape is not None and not self.defer:
 542            self.data = numpy.zeros(shape, dtype=numpy.int32)
 543        elif not self.defer and self.filename:
 544            self.load()
 545        else:
 546            self.data = None
 547        self.endindex = 0
 548        if ndarray is not None and self.data is not None:
 549            self.endindex = len(ndarray)
 550            self.data[0:self.endindex] = ndarray
 551    
 552    def load(self):
 553        if isinstance(self.data, numpy.ndarray):
 554            return
 555        temp_file_handle = None
 556        if self.filename.lower().endswith(".wav") and (self.sampleRate, self.numChannels) == (44100, 2):
 557            file_to_read = self.filename
 558        elif self.convertedfile:
 559            file_to_read = self.convertedfile
 560        else:
 561            temp_file_handle, self.convertedfile = tempfile.mkstemp(".wav")
 562            result = ffmpeg(self.filename, self.convertedfile, overwrite=True, 
 563                numChannels=self.numChannels, sampleRate=self.sampleRate, verbose=self.verbose)
 564            ffmpeg_error_check(result[1])
 565            file_to_read = self.convertedfile
 566        
 567        w = wave.open(file_to_read, 'r')
 568        numFrames = w.getnframes()
 569        raw = w.readframes(numFrames)
 570        sampleSize = numFrames * self.numChannels
 571        data = numpy.frombuffer(raw, dtype="<h", count=sampleSize)
 572        ndarray = numpy.array(data, dtype=numpy.int16)
 573        if self.numChannels > 1:
 574            ndarray.resize((numFrames, self.numChannels))
 575        self.data = numpy.zeros(ndarray.shape, dtype=numpy.int32)
 576        self.endindex = 0
 577        if ndarray is not None:
 578            self.endindex = len(ndarray)
 579            self.data[0:self.endindex] = ndarray
 580        if temp_file_handle is not None:
 581            os.close(temp_file_handle)
 582        w.close()
 583    
 584    def encode(self, filename=None, mp3=None):
 585        """
 586        Outputs an MP3 or WAVE file to `filename`.
 587        Format is determined by `mp3` parameter.
 588        """
 589        self.normalize()
 590        temp_file_handle = None
 591        if not mp3 and filename.lower().endswith('.wav'):
 592            mp3 = False
 593        else:
 594            mp3 = True
 595        if mp3:
 596            temp_file_handle, tempfilename = tempfile.mkstemp(".wav")        
 597        else:
 598            tempfilename = filename
 599        fid = open(tempfilename, 'wb')
 600        # Based on Scipy svn
 601        # http://projects.scipy.org/pipermail/scipy-svn/2007-August/001189.html
 602        fid.write('RIFF')
 603        fid.write(struct.pack('<i',0)) # write a 0 for length now, we'll go back and add it later
 604        fid.write('WAVE')
 605        # fmt chunk
 606        fid.write('fmt ')
 607        if self.normalized.ndim == 1:
 608            noc = 1
 609        else:
 610            noc = self.normalized.shape[1]
 611        bits = self.normalized.dtype.itemsize * 8
 612        sbytes = self.sampleRate*(bits / 8)*noc
 613        ba = noc * (bits / 8)
 614        fid.write(struct.pack('<ihHiiHH', 16, 1, noc, self.sampleRate, sbytes, ba, bits))
 615        # data chunk
 616        fid.write('data')
 617        fid.write(struct.pack('<i', self.normalized.nbytes))
 618        self.normalized.tofile(fid)
 619        # Determine file size and place it in correct
 620        # position at start of the file. 
 621        size = fid.tell()
 622        fid.seek(4)
 623        fid.write(struct.pack('<i', size-8))
 624        fid.close()
 625        self.normalized = None
 626        if not mp3:
 627            return tempfilename
 628        # now convert it to mp3
 629        if not filename.lower().endswith('.mp3'):
 630            filename = filename + '.mp3'
 631        try:
 632            bitRate = config.MP3_BITRATE
 633        except (NameError, AttributeError):
 634            bitRate = 128
 635        parsestring = ffmpeg(tempfilename, filename, bitRate=bitRate, verbose=self.verbose)
 636        ffmpeg_error_check(parsestring[1])
 637        if tempfilename != filename:
 638            if self.verbose:
 639                print >> sys.stderr, "Deleting: %s" % tempfilename
 640            os.remove(tempfilename)
 641        if temp_file_handle is not None:
 642            os.close(temp_file_handle)
 643        return filename
 644    
 645    def normalize(self):
 646        """Return to 16-bit for encoding."""
 647        if self.numChannels == 1:
 648            self.normalized = numpy.zeros((self.data.shape[0],), dtype=numpy.int16)
 649        else:
 650            self.normalized = numpy.zeros((self.data.shape[0], self.data.shape[1]), dtype=numpy.int16)
 651        
 652        factor = 32767.0 / numpy.max(numpy.absolute(self.data.flatten()))
 653        # If the max was 32768, don't bother scaling:
 654        if factor < 1.000031:
 655            self.normalized[:len(self.data)] += self.data * factor
 656        else:
 657            self.normalized[:len(self.data)] += self.data
 658    
 659    def pad_with_zeros(self, num_samples):
 660        if num_samples > 0:
 661            if self.numChannels == 1:
 662                extra_shape = (num_samples,)
 663            else:
 664                extra_shape = (num_samples, self.numChannels)
 665            self.data = numpy.append(self.data, 
 666                                     numpy.zeros(extra_shape, dtype=numpy.int32), axis=0)
 667    
 668
 669def get_os():
 670    """returns is_linux, is_mac, is_windows"""
 671    if hasattr(os, 'uname'):
 672        if os.uname()[0] == "Darwin":
 673            return False, True, False
 674        return True, False, False
 675    return False, False, True
 676    
 677def ffmpeg(infile, outfile=None, overwrite=True, bitRate=None, numChannels=None, sampleRate=None, verbose=True):
 678    """
 679    Executes ffmpeg through the shell to convert or read media files.
 680    """
 681    command = "en-ffmpeg"
 682    if overwrite:
 683        command += " -y"
 684    command += " -i \"" + infile + "\""
 685    if bitRate is not None:
 686        command += " -ab " + str(bitRate) + "k"
 687    if numChannels is not None:
 688        command += " -ac " + str(numChannels)
 689    if sampleRate is not None:
 690        command += " -ar " + str(sampleRate)
 691    if outfile is not None:
 692        command += " \"%s\"" % outfile
 693    if verbose:
 694        print >> sys.stderr, command
 695    
 696    (lin, mac, win) = get_os()
 697    if(not win):
 698        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
 699    else:
 700        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)        
 701    return_val = p.communicate()
 702    return return_val
 703
 704def settings_from_ffmpeg(parsestring):
 705    """
 706    Parses the output of ffmpeg to determine sample rate and frequency of 
 707    an audio file.
 708    """
 709    parse = parsestring.split('\n')
 710    freq, chans = 44100, 2
 711    for line in parse:
 712        if "Stream #0" in line and "Audio" in line:
 713            segs = line.split(", ")
 714            for s in segs:
 715                if "Hz" in s:
 716                    #print "Found: "+str(s.split(" ")[0])+"Hz"
 717                    freq = int(s.split(" ")[0])
 718                elif "stereo" in s:
 719                    #print "stereo"
 720                    chans = 2
 721                elif "2 channels" in s:
 722                    #print "stereo"
 723                    chans = 2
 724                elif "mono" in s:
 725                    #print "mono"
 726                    chans = 1
 727                elif "1 channels" in s:
 728                    #print "mono"
 729                    chans = 1
 730    return freq, chans
 731
 732ffmpeg_install_instructions = """
 733en-ffmpeg not found! Please make sure ffmpeg is installed and create a link as follows:
 734    sudo ln -s `which ffmpeg` /usr/local/bin/en-ffmpeg
 735"""
 736def ffmpeg_error_check(parsestring):
 737    "Looks for known errors in the ffmpeg output"
 738    parse = parsestring.split('\n')
 739    error_cases = [ "Unknown format", # ffmpeg can't figure out format of input file
 740                    "error occur", # an error occurred
 741                    "Could not open", # user doesn't have permission to access file
 742                    "not found" # could not find encoder for output file
 743                    ]
 744    for num, line in enumerate(parse):
 745        if "command not found" in line:
 746            raise RuntimeError(ffmpeg_install_instructions)
 747        for error in error_cases:
 748            if error in line:
 749                report = "\n\t".join(parse[num:])
 750                raise RuntimeError("ffmpeg conversion error:\n\t" + report)
 751
 752def getpieces(audioData, segs):
 753    """
 754    Collects audio samples for output.
 755    Returns a new `AudioData` where the new sample data is assembled
 756    from the input audioData according to the time offsets in each 
 757    of the elements of the input segs (commonly an `AudioQuantumList`).
 758    
 759    :param audioData: an `AudioData` object
 760    :param segs: an iterable containing objects that may be accessed
 761        as slices or indices for an `AudioData`
 762    """
 763    #calculate length of new segment
 764    audioData.data = None
 765    audioData.load()
 766    dur = 0
 767    for s in segs:
 768        dur += int(s.duration * audioData.sampleRate)
 769    # if I wanted to add some padding to the length, I'd do it here
 770    
 771    #determine shape of new array
 772    if len(audioData.data.shape) > 1:
 773        newshape = (dur, audioData.data.shape[1])
 774        newchans = audioData.data.shape[1]
 775    else:
 776        newshape = (dur,)
 777        newchans = 1
 778    
 779    #make accumulator segment
 780    newAD = AudioData(shape=newshape, sampleRate=audioData.sampleRate, 
 781                    numChannels=newchans, defer=False, verbose=audioData.verbose)
 782    
 783    #concatenate segs to the new segment
 784    for s in segs:
 785        newAD.append(audioData[s])
 786    # audioData.unload()
 787    return newAD
 788
 789def assemble(audioDataList, numChannels=1, sampleRate=44100, verbose=True):
 790    """
 791    Collects audio samples for output.
 792    Returns a new `AudioData` object assembled
 793    by concatenating all the elements of audioDataList.
 794    
 795    :param audioDatas: a list of `AudioData` objects
 796    """
 797    if numChannels == 1:
 798        new_shape = (sum([len(x.data) for x in audioDataList]),)
 799    else:
 800        new_shape = (sum([len(x.data) for x in audioDataList]),numChannels)
 801    new_data = AudioData(shape=new_shape, numChannels=numChannels, 
 802                        sampleRate=sampleRate, defer=False, verbose=verbose)
 803    for ad in audioDataList:
 804        if not isinstance(ad, AudioData):
 805            raise TypeError('Encountered something other than an AudioData')
 806        new_data.append(ad)
 807    return new_data
 808
 809def mix(dataA,dataB,mix=0.5):
 810    """
 811    Mixes two `AudioData` objects. Assumes they have the same sample rate
 812    and number of channels.
 813    
 814    Mix takes a float 0-1 and determines the relative mix of two audios.
 815    i.e., mix=0.9 yields greater presence of dataA in the final mix.
 816    """
 817    if dataA.endindex > dataB.endindex:
 818        newdata = AudioData(ndarray=dataA.data, sampleRate=dataA.sampleRate, numChannels=dataA.numChannels, defer=False)
 819        newdata.data = newdata.data * float(mix)
 820        newdata.data[:dataB.endindex] += dataB.data[:] * (1 - float(mix))
 821    else:
 822        newdata = AudioData(ndarray=dataB.data, sampleRate=dataB.sampleRate, numChannels=dataB.numChannels, defer=False)
 823        newdata.data = newdata.data * (1.0 - float(mix))
 824        newdata.data[:dataA.endindex] += dataA.data[:] * float(mix)
 825    return newdata
 826
 827def megamix(dataList):
 828    """
 829    Mix together any number of `AudioData` objects. Keep the shape of
 830    the first one in the list. Assume they all have the same sample rate
 831    and number of channels.
 832    """
 833    if not isinstance(dataList, list):
 834        raise TypeError('input must be a list of AudioData objects')
 835    newdata = AudioData(shape=dataList[0].data.shape, sampleRate=dataList[0].sampleRate, 
 836                            numChannels=dataList[0].numChannels, defer=False)
 837    for adata in dataList:
 838        if not isinstance(adata, AudioData):
 839            raise TypeError('input must be a list of AudioData objects')
 840        if len(adata) > len(newdata):
 841            newseg = AudioData(ndarray=adata[:newdata.endindex].data, 
 842                                numChannels=newdata.numChannels, 
 843                                sampleRate=newdata.sampleRate, defer=False)
 844            newseg.endindex = newdata.endindex
 845        else:
 846            newseg = AudioData(ndarray=adata.data, 
 847                                numChannels=newdata.numChannels, 
 848                                sampleRate=newdata.sampleRate, defer=False)
 849            newseg.endindex = adata.endindex
 850        newdata.data[:newseg.endindex] += newseg.data / float(len(dataList))
 851    newdata.endindex = len(newdata)
 852    return newdata
 853
 854
 855class LocalAudioFile(AudioData):
 856    """
 857    The basic do-everything class for remixing. Acts as an `AudioData` 
 858    object, but with an added `analysis` selector which is an
 859    `AudioAnalysis` object. It conditianally uploads the file
 860    it was initialized with. If the file is already known to the 
 861    Analyze API, then it does not bother uploading the file.
 862    """
 863    def __init__(self, filename, verbose=True, defer=False):
 864        """
 865        :param filename: path to a local MP3 file
 866        """
 867        AudioData.__init__(self, filename=filename, verbose=verbose, defer=defer)
 868        track_md5 = hashlib.md5(file(filename, 'rb').read()).hexdigest()
 869        if verbose:
 870            print >> sys.stderr, "Computed MD5 of file is " + track_md5 
 871        try:
 872            if verbose:
 873                print >> sys.stderr, "Probing for existing analysis"
 874            tempanalysis = AudioAnalysis(track_md5)
 875        except Exception, e:
 876            if verbose:
 877                print >> sys.stderr, "Analysis not found. Uploading..."
 878            tempanalysis = AudioAnalysis(filename)
 879
 880        self.analysis = tempanalysis
 881        self.analysis.source = self
 882    
 883    def toxml(self, context=None):
 884       raise NotImplementedError 
 885
 886    @property
 887    def duration(self):
 888        """
 889        Since we consider `AudioFile` to be an evolved version of 
 890        `AudioData`, we return the measured duration from the analysis.
 891        """
 892        return self.analysis.duration
 893    
 894    def __setstate__(self, state):
 895        """
 896        Recreates circular reference after unpickling.
 897        """
 898        self.__dict__.update(state)
 899        self.analysis.source = weakref.proxy(self)
 900
 901
 902class LocalAnalysis(object):
 903    """
 904    Like `LocalAudioFile`, it conditionally uploads the file with which
 905    it was initialized. Unlike `LocalAudioFile`, it is not a subclass of 
 906    `AudioData`, so contains no sample data.
 907    """
 908    def __init__(self, filename, verbose=True):
 909        """
 910        :param filename: path to a local MP3 file
 911        """
 912
 913        track_md5 = hashlib.md5(file(filename, 'rb').read()).hexdigest()
 914        if verbose:
 915            print >> sys.stderr, "Computed MD5 of file is " + track_md5 
 916        try:
 917            if verbose:
 918                print >> sys.stderr, "Probing for existing analysis"
 919            tempanalysis = AudioAnalysis(track_md5)
 920        except Exception, e:
 921            print e
 922            if verbose:
 923                print >> sys.stderr, "Analysis not found. Uploading..."
 924            tempanalysis = AudioAnalysis(filename)
 925
 926        self.analysis = tempanalysis
 927        self.analysis.source = self
 928
 929class AudioQuantum(AudioRenderable) :
 930    """
 931    A unit of musical time, identified at minimum with a start time and 
 932    a duration, both in seconds. It most often corresponds with a `section`,
 933    `bar`, `beat`, `tatum`, or (by inheritance) `segment` obtained from an Analyze
 934    API call.
 935    
 936    Additional properties include:
 937    
 938    end
 939        computed time offset for convenience: `start` + `duration`
 940    container
 941        a circular reference to the containing `AudioQuantumList`,
 942        created upon creation of the `AudioQuantumList` that covers
 943        the whole track
 944    """
 945    def __init__(self, start=0, duration=0, kind=None, confidence=None, source=None) :
 946        """
 947        Initializes an `AudioQuantum`.
 948        
 949        :param start: offset from the start of the track, in seconds
 950        :param duration: length of the `AudioQuantum`
 951        :param kind: string containing what kind of rhythm unit it came from
 952        :param confidence: float between zero and one
 953        """
 954        self.start = start
 955        self.duration = duration
 956        self.kind = kind
 957        self.confidence = confidence
 958        self._source = source
 959    
 960    def get_end(self):
 961        return self.start + self.duration
 962    
 963    end = property(get_end, doc="""
 964    A computed property: the sum of `start` and `duration`.
 965    """)
 966    
 967    def get_source(self):
 968        "Returns itself or its parent."
 969        if self._source:
 970            return self._source
 971        else:
 972            source = None
 973            try:
 974                source = self.container.source
 975            except AttributeError:
 976                source = None
 977            return source
 978    
 979    def set_source(self, value):
 980        if isinstance(value, AudioData):
 981            self._source = value
 982        else:
 983            raise TypeError("Source must be an instance of echonest.audio.AudioData")
 984    
 985    source = property(get_source, set_source, doc="""
 986    The `AudioData` source for the AudioQuantum.
 987    """)
 988    
 989    def parent(self):
 990        """
 991        Returns the containing `AudioQuantum` in the rhythm hierarchy:
 992        a `tatum` returns a `beat`, a `beat` returns a `bar`, and a `bar` returns a
 993        `section`.
 994        """
 995        pars = {'tatum': 'beats',
 996                'beat':  'bars',
 997                'bar':   'sections'}
 998        try:
 999            uppers = getattr(self.container.container, pars[self.kind])
1000            return uppers.that(selection.overlap(self))[0]
1001        except LookupError:
1002            # Might not be in pars, might not have anything in parent.
1003            return None
1004    
1005    def children(self):
1006        """
1007        Returns an `AudioQuantumList` of the AudioQuanta that it contains,
1008        one step down the hierarchy. A `beat` returns `tatums`, a `bar` returns
1009        `beats`, and a `section` returns `bars`.
1010        """
1011        chils = {'beat':    'tatums',
1012                 'bar':     'beats',
1013                 'section': 'bars'}
1014        try:
1015            downers = getattr(self.container.container, chils[self.kind])
1016            return downers.that(selection.are_contained_by(self))
1017        except LookupError:
1018            return None
1019    
1020    def group(self):
1021        """
1022        Returns the `children`\() of the `AudioQuantum`\'s `parent`\(). 
1023        In other words: 'siblings'. If no parent is found, then return the
1024        `AudioQuantumList` for the whole track.
1025        """
1026        if self.parent():
1027            return self.parent().children()
1028        else:
1029            return self.container
1030    
1031    def prev(self, step=1):
1032        """
1033        Step backwards in the containing `AudioQuantumList`.
1034        Returns `self` if a boundary is reached.
1035        """
1036        group = self.container
1037        try:
1038            loc = group.index(self)
1039            new = max(loc - step, 0)
1040            return group[new]
1041        except Exception:
1042            return self
1043    
1044    def next(self, step=1):
1045        """
1046        Step forward in the containing `AudioQuantumList`.
1047        Returns `self` if a boundary is reached.
1048        """
1049        group = self.container
1050        try:
1051            loc = group.index(self)
1052            new = min(loc + step, len(group))
1053            return group[new]
1054        except Exception:
1055            return self
1056    
1057    def __str__(self):
1058        """
1059        Lists the `AudioQuantum`.kind with start and 
1060        end times, in seconds, e.g.::
1061        
1062            "segment (20.31 - 20.42)"
1063        """
1064        return "%s (%.2f - %.2f)" % (self.kind, self.start, self.end)
1065    
1066    def __repr__(self):
1067        """
1068        A string representing a constructor, including kind, start time, 
1069        duration, and (if it exists) confidence, e.g.::
1070        
1071            "AudioQuantum(kind='tatum', start=42.198267, duration=0.1523394)"
1072        """
1073        if self.confidence is not None:
1074            return "AudioQuantum(kind='%s', start=%f, duration=%f, confidence=%f)" % (self.kind, self.start, self.duration, self.confidence)
1075        else:
1076            return "AudioQuantum(kind='%s', start=%f, duration=%f)" % (self.kind, self.start, self.duration)
1077    
1078    def local_context(self):
1079        """
1080        Returns a tuple of (*index*, *length*) within rhythm siblings, where
1081        *index* is the (zero-indexed) position within its `group`\(), and 
1082        *length* is the number of siblings within its `group`\().
1083        """
1084        group = self.group()
1085        count = len(group)
1086        try:
1087            loc  = group.index(self)
1088        except Exception: # seem to be some uncontained beats
1089            loc = 0
1090        return (loc, count,)
1091    
1092    def absolute_context(self):
1093        """
1094        Returns a tuple of (*index*, *length*) within the containing 
1095        `AudioQuantumList`, where *index* is the (zero-indexed) position within 
1096        its container, and *length* is the number of siblings within the
1097        container.
1098        """
1099        group = self.container
1100        count = len(group)
1101        loc = group.index(self)
1102        return (loc, count,)
1103    
1104    def context_string(self):
1105        """
1106        Returns a one-indexed, human-readable version of context.
1107        For example::
1108            
1109            "bar 4 of 142, beat 3 of 4, tatum 2 of 3"
1110        """
1111        if self.parent() and self.kind != "bar":
1112            return "%s, %s %i of %i" % (self.parent().context_string(),
1113                                  self.kind, self.local_context()[0] + 1,
1114                                  self.local_context()[1])
1115        else:
1116            return "%s %i of %i" % (self.kind, self.absolute_context()[0] + 1,
1117                                  self.absolute_context()[1])
1118    
1119    def __getstate__(self):
1120        """
1121        Eliminates the circular reference for pickling.
1122        """
1123        dictclone = self.__dict__.copy()
1124        if 'container' in dictclone:
1125            del dictclone['container']
1126        return dictclone
1127    
1128    def toxml(self, context=None):
1129        attributedict = {'duration': str(self.duration),
1130                         'start': str(self.start)}
1131        try:
1132            if not(hasattr(context, 'source') and self.source == context.source):
1133                attributedict['source'] = self.source.analysis.identifier
1134        except Exception:
1135            pass
1136        xml = etree.Element(self.kind, attrib=attributedict)
1137        if context:
1138            return xml
1139        else:
1140            return minidom.parseString(xml).toprettyxml()
1141        
1142    
1143    def render(self, start=0.0, to_audio=None, with_source=None):
1144        if not to_audio:
1145            source = self.resolve_source(with_source)
1146            return source[self]
1147        if with_source != self.source:
1148            return
1149        to_audio.add_at(start, with_source[self])
1150        return    
1151    
1152
1153class AudioSegment(AudioQuantum):
1154    """
1155    Subclass of `AudioQuantum` for the data-rich segments returned by
1156    the Analyze API. 
1157    """
1158    def __init__(self, start=0., duration=0., pitches = None, timbre = None, 
1159                 loudness_begin=0., loudness_max=0., time_loudness_max=0., 
1160                 loudness_end=None, kind='segment', source=None):
1161        """
1162        Initializes an `AudioSegment`.
1163        
1164        :param start: offset from start of the track, in seconds
1165        :param duration: duration of the `AudioSegment`, in seconds
1166        :param pitches: a twelve-element list with relative loudnesses of each
1167                pitch class, from C (pitches[0]) to B (pitches[11])
1168        :param timbre: a twelve-element list with the loudness of each of a
1169                principal component of time and/or frequency profile
1170        :param kind: string identifying the kind of AudioQuantum: "segment"
1171        :param loudness_begin: loudness in dB at the start of the segment
1172        :param loudness_max: loudness in dB at the loudest moment of the 
1173                segment
1174        :param time_loudness_max: time (in sec from start of segment) of 
1175                loudest moment
1176        :param loudness_end: loudness at end of segment (if it is given)
1177        """
1178        self.start = start
1179        self.duration = duration
1180        self.pitches = pitches or []
1181        self.timbre = timbre or []
1182        self.loudness_begin = loudness_begin
1183        self.loudness_max = loudness_max
1184        self.time_loudness_max = time_loudness_max
1185        if loudness_end:
1186            self.loudness_end = loudness_end
1187        self.kind = kind
1188        self.confidence = None
1189        self._source = source
1190
1191class ModifiedRenderable(AudioRenderable):
1192    """Class that contains any AudioRenderable, but overrides the
1193    render() method with nested effects, called sequentially on the
1194    result of the preceeding effect."""
1195    def __init__(self, original, effects=[]):
1196        if isinstance(original, ModifiedRenderable):
1197            self._original = original._original
1198            self._effects = original._effects + effects
1199        else:
1200            self._original = original
1201            self._effects = effects
1202    
1203    @property
1204    def duration(self):
1205        dur = self._original.duration
1206        for effect in self._effects:
1207            if hasattr(effect, 'duration'):
1208                dur = effect.duration(dur)
1209        return dur
1210    
1211    @property
1212    def source(self):
1213        return self._original.source
1214    
1215    @property
1216    def sources(self):
1217        return self._original.sources
1218    
1219    def render(self, start=0.0, to_audio=None, with_source=None):
1220        if not to_audio:
1221            source = self.resolve_source(with_source)
1222            base = self._original.render(with_source=with_source)
1223            copy = AudioData32(ndarray=base.data, sampleRate=base.sampleRate, numChannels=base.numChannels, defer=False)
1224            for effect in self._effects:
1225                copy = effect.modify(copy)
1226            return copy
1227        if with_source != self.source:
1228            return
1229        base = self._original.render(with_source=with_source)
1230        copy = AudioData32(ndarray=base.data, shape=base.data.shape, sampleRate=base.sampleRate, numChannels=base.numChannels, defer=False)
1231        for effect in self._effects:
1232            copy = effect.modify(copy)
1233        to_audio.add_at(start, copy)
1234        return    
1235    
1236    def toxml(self, context=None):
1237        outerattributedict = {'duration': str(self.duration)}
1238        node = etree.Element("modified_audioquantum", attrib=outerattributedict)
1239        
1240        innerattributedict = {'duration': str(self._original.duration),
1241                              'start': str(self._original.start)}
1242        try:
1243            if not(hasattr(context, 'source') and self.source == context.source):
1244                innerattributedict['source'] = self.source.analysis.identifier
1245        except Exception:
1246            pass
1247        orignode = etree.Element(self._original.kind, attrib=innerattributedict)
1248        node.append(orignode)
1249        fx = etree.Element('effects')
1250        for effect in self._effects:
1251            fxdict = {'id': '%s.%s' % (effect.__module__, effect.__class__.__name__)}
1252            fxdict.update(effect.__dict__)
1253            fx.append(etree.Element('effect', attrib=fxdict))
1254        node.append(fx)
1255        if context:
1256            return node
1257        else:
1258            return minidom.parseString(node).toprettyxml()
1259        
1260
1261class AudioEffect(object):
1262    def __call__(self, aq):
1263        return ModifiedRenderable(aq, [self])
1264    
1265class LevelDB(AudioEffect):
1266    def __init__(self, change):
1267        self.change = change
1268    
1269    def modify(self, adata):
1270        adata.data *= pow(10.,self.change/20.)
1271        return adata
1272
1273class AmplitudeFactor(AudioEffect):
1274    def __init__(self, change):
1275        self.change = change
1276    
1277    def modify(self, adata):
1278        adata.data *= self.change
1279        return adata
1280        
1281class TimeTruncateFactor(AudioEffect):
1282    def __init__(self, factor):
1283        self.factor = factor
1284    
1285    def duration(self, old_duration):
1286        return old_duration * self.factor
1287    
1288    def modify(self, adata):
1289        endindex = int(self.factor * len(adata))
1290        if self.factor > 1:
1291            adata.pad_with_zeros(endindex - len(adata))
1292        adata.endindex = endindex
1293        return adata[:endindex]
1294    
1295
1296class TimeTruncateLength(AudioEffect):
1297    def __init__(self, new_duration):
1298        self.new_duration = new_duration
1299    
1300    def duration(self, old_duration):
1301        return self.new_duration
1302    
1303    def modify(self, adata):
1304        endindex = int(self.new_duration * adata.sampleRate)
1305        if self.new_duration > adata.duration:
1306            adata.pad_with_zeros(endindex - len(adata))
1307        adata.endindex = endindex
1308        return adata[:endindex]
1309
1310
1311class AudioQuantumList(list, AudioRenderable):
1312    """
1313    A cont…

Large files files are truncated, but you can click here to view the full file