PageRenderTime 66ms CodeModel.GetById 14ms app.highlight 42ms RepoModel.GetById 1ms app.codeStats 0ms

/boto-2.5.2/boto/s3/bucket.py

#
Python | 1468 lines | 1437 code | 8 blank | 23 comment | 0 complexity | 32d5d29b8ca53f10325a9b4e6c282c1e MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
   2# Copyright (c) 2010, Eucalyptus Systems, Inc.
   3# All rights reserved.
   4#
   5# Permission is hereby granted, free of charge, to any person obtaining a
   6# copy of this software and associated documentation files (the
   7# "Software"), to deal in the Software without restriction, including
   8# without limitation the rights to use, copy, modify, merge, publish, dis-
   9# tribute, sublicense, and/or sell copies of the Software, and to permit
  10# persons to whom the Software is furnished to do so, subject to the fol-
  11# lowing conditions:
  12#
  13# The above copyright notice and this permission notice shall be included
  14# in all copies or substantial portions of the Software.
  15#
  16# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  17# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
  18# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
  19# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
  20# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22# IN THE SOFTWARE.
  23
  24import boto
  25from boto import handler
  26from boto.resultset import ResultSet
  27from boto.exception import BotoClientError
  28from boto.s3.acl import Policy, CannedACLStrings, Grant
  29from boto.s3.key import Key
  30from boto.s3.prefix import Prefix
  31from boto.s3.deletemarker import DeleteMarker
  32from boto.s3.multipart import MultiPartUpload
  33from boto.s3.multipart import CompleteMultiPartUpload
  34from boto.s3.multidelete import MultiDeleteResult
  35from boto.s3.multidelete import Error
  36from boto.s3.bucketlistresultset import BucketListResultSet
  37from boto.s3.bucketlistresultset import VersionedBucketListResultSet
  38from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
  39from boto.s3.lifecycle import Lifecycle
  40from boto.s3.bucketlogging import BucketLogging
  41import boto.jsonresponse
  42import boto.utils
  43import xml.sax
  44import xml.sax.saxutils
  45import StringIO
  46import urllib
  47import re
  48import base64
  49from collections import defaultdict
  50
  51# as per http://goo.gl/BDuud (02/19/2011)
  52class S3WebsiteEndpointTranslate:
  53    trans_region = defaultdict(lambda :'s3-website-us-east-1')
  54
  55    trans_region['eu-west-1'] = 's3-website-eu-west-1'
  56    trans_region['us-west-1'] = 's3-website-us-west-1'
  57    trans_region['us-west-2'] = 's3-website-us-west-2'
  58    trans_region['sa-east-1'] = 's3-website-sa-east-1'
  59    trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
  60    trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
  61
  62    @classmethod
  63    def translate_region(self, reg):
  64        return self.trans_region[reg]
  65
  66S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
  67
  68class Bucket(object):
  69
  70    LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
  71
  72    BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
  73       <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
  74         <Payer>%s</Payer>
  75       </RequestPaymentConfiguration>"""
  76
  77    VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
  78       <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
  79         <Status>%s</Status>
  80         <MfaDelete>%s</MfaDelete>
  81       </VersioningConfiguration>"""
  82
  83    WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?>
  84      <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
  85        <IndexDocument><Suffix>%s</Suffix></IndexDocument>
  86        %s
  87      </WebsiteConfiguration>"""
  88
  89    WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>"""
  90
  91    VersionRE = '<Status>([A-Za-z]+)</Status>'
  92    MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
  93
  94    def __init__(self, connection=None, name=None, key_class=Key):
  95        self.name = name
  96        self.connection = connection
  97        self.key_class = key_class
  98
  99    def __repr__(self):
 100        return '<Bucket: %s>' % self.name
 101
 102    def __iter__(self):
 103        return iter(BucketListResultSet(self))
 104
 105    def __contains__(self, key_name):
 106        return not (self.get_key(key_name) is None)
 107
 108    def startElement(self, name, attrs, connection):
 109        return None
 110
 111    def endElement(self, name, value, connection):
 112        if name == 'Name':
 113            self.name = value
 114        elif name == 'CreationDate':
 115            self.creation_date = value
 116        else:
 117            setattr(self, name, value)
 118
 119    def set_key_class(self, key_class):
 120        """
 121        Set the Key class associated with this bucket.  By default, this
 122        would be the boto.s3.key.Key class but if you want to subclass that
 123        for some reason this allows you to associate your new class with a
 124        bucket so that when you call bucket.new_key() or when you get a listing
 125        of keys in the bucket you will get an instances of your key class
 126        rather than the default.
 127        
 128        :type key_class: class
 129        :param key_class: A subclass of Key that can be more specific
 130        """
 131        self.key_class = key_class
 132
 133    def lookup(self, key_name, headers=None):
 134        """
 135        Deprecated: Please use get_key method.
 136        
 137        :type key_name: string
 138        :param key_name: The name of the key to retrieve
 139        
 140        :rtype: :class:`boto.s3.key.Key`
 141        :returns: A Key object from this bucket.
 142        """
 143        return self.get_key(key_name, headers=headers)
 144        
 145    def get_key(self, key_name, headers=None, version_id=None, response_headers=None):
 146        """
 147        Check to see if a particular key exists within the bucket.  This
 148        method uses a HEAD request to check for the existance of the key.
 149        Returns: An instance of a Key object or None
 150        
 151        :type key_name: string
 152        :param key_name: The name of the key to retrieve
 153
 154        :type response_headers: dict
 155        :param response_headers: A dictionary containing HTTP headers/values
 156                                 that will override any headers associated with
 157                                 the stored object in the response.
 158                                 See http://goo.gl/EWOPb for details.
 159        
 160        :rtype: :class:`boto.s3.key.Key`
 161        :returns: A Key object from this bucket.
 162        """
 163        query_args = []
 164        if version_id:
 165            query_args.append('versionId=%s' % version_id)
 166        if response_headers:
 167            for rk, rv in response_headers.iteritems():
 168                query_args.append('%s=%s' % (rk, urllib.quote(rv)))
 169        if query_args:
 170            query_args = '&'.join(query_args)
 171        else:
 172            query_args = None
 173        response = self.connection.make_request('HEAD', self.name, key_name,
 174                                                headers=headers,
 175                                                query_args=query_args)
 176        response.read()
 177        # Allow any success status (2xx) - for example this lets us
 178        # support Range gets, which return status 206:
 179        if response.status/100 == 2:
 180            k = self.key_class(self)
 181            provider = self.connection.provider
 182            k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
 183            k.etag = response.getheader('etag')
 184            k.content_type = response.getheader('content-type')
 185            k.content_encoding = response.getheader('content-encoding')
 186            k.content_disposition = response.getheader('content-disposition')
 187            k.content_language = response.getheader('content-language')
 188            k.last_modified = response.getheader('last-modified')
 189            # the following machinations are a workaround to the fact that
 190            # apache/fastcgi omits the content-length header on HEAD
 191            # requests when the content-length is zero.
 192            # See http://goo.gl/0Tdax for more details.
 193            clen = response.getheader('content-length')
 194            if clen:
 195                k.size = int(response.getheader('content-length'))
 196            else:
 197                k.size = 0
 198            k.cache_control = response.getheader('cache-control')
 199            k.name = key_name
 200            k.handle_version_headers(response)
 201            k.handle_encryption_headers(response)
 202            return k
 203        else:
 204            if response.status == 404:
 205                return None
 206            else:
 207                raise self.connection.provider.storage_response_error(
 208                    response.status, response.reason, '')
 209
 210    def list(self, prefix='', delimiter='', marker='', headers=None):
 211        """
 212        List key objects within a bucket.  This returns an instance of an
 213        BucketListResultSet that automatically handles all of the result
 214        paging, etc. from S3.  You just need to keep iterating until
 215        there are no more results.
 216        
 217        Called with no arguments, this will return an iterator object across
 218        all keys within the bucket.
 219
 220        The Key objects returned by the iterator are obtained by parsing
 221        the results of a GET on the bucket, also known as the List Objects
 222        request.  The XML returned by this request contains only a subset
 223        of the information about each key.  Certain metadata fields such
 224        as Content-Type and user metadata are not available in the XML.
 225        Therefore, if you want these additional metadata fields you will
 226        have to do a HEAD request on the Key in the bucket.
 227        
 228        :type prefix: string
 229        :param prefix: allows you to limit the listing to a particular
 230                        prefix.  For example, if you call the method with
 231                        prefix='/foo/' then the iterator will only cycle
 232                        through the keys that begin with the string '/foo/'.
 233                        
 234        :type delimiter: string
 235        :param delimiter: can be used in conjunction with the prefix
 236                        to allow you to organize and browse your keys
 237                        hierarchically. See:
 238                        http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
 239                        for more details.
 240                        
 241        :type marker: string
 242        :param marker: The "marker" of where you are in the result set
 243        
 244        :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
 245        :return: an instance of a BucketListResultSet that handles paging, etc
 246        """
 247        return BucketListResultSet(self, prefix, delimiter, marker, headers)
 248
 249    def list_versions(self, prefix='', delimiter='', key_marker='',
 250                      version_id_marker='', headers=None):
 251        """
 252        List version objects within a bucket.  This returns an instance of an
 253        VersionedBucketListResultSet that automatically handles all of the result
 254        paging, etc. from S3.  You just need to keep iterating until
 255        there are no more results.
 256        Called with no arguments, this will return an iterator object across
 257        all keys within the bucket.
 258        
 259        :type prefix: string
 260        :param prefix: allows you to limit the listing to a particular
 261                        prefix.  For example, if you call the method with
 262                        prefix='/foo/' then the iterator will only cycle
 263                        through the keys that begin with the string '/foo/'.
 264                        
 265        :type delimiter: string
 266        :param delimiter: can be used in conjunction with the prefix
 267                        to allow you to organize and browse your keys
 268                        hierarchically. See:
 269                        http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
 270                        for more details.
 271                        
 272        :type marker: string
 273        :param marker: The "marker" of where you are in the result set
 274        
 275        :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
 276        :return: an instance of a BucketListResultSet that handles paging, etc
 277        """
 278        return VersionedBucketListResultSet(self, prefix, delimiter, key_marker,
 279                                            version_id_marker, headers)
 280
 281    def list_multipart_uploads(self, key_marker='',
 282                               upload_id_marker='',
 283                               headers=None):
 284        """
 285        List multipart upload objects within a bucket.  This returns an
 286        instance of an MultiPartUploadListResultSet that automatically
 287        handles all of the result paging, etc. from S3.  You just need
 288        to keep iterating until there are no more results.
 289        
 290        :type marker: string
 291        :param marker: The "marker" of where you are in the result set
 292        
 293        :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
 294        :return: an instance of a BucketListResultSet that handles paging, etc
 295        """
 296        return MultiPartUploadListResultSet(self, key_marker,
 297                                            upload_id_marker,
 298                                            headers)
 299
 300    def _get_all(self, element_map, initial_query_string='',
 301                 headers=None, **params):
 302        l = []
 303        for k, v in params.items():
 304            k = k.replace('_', '-')
 305            if  k == 'maxkeys':
 306                k = 'max-keys'
 307            if isinstance(v, unicode):
 308                v = v.encode('utf-8')
 309            if v is not None and v != '':
 310                l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
 311        if len(l):
 312            s = initial_query_string + '&' + '&'.join(l)
 313        else:
 314            s = initial_query_string
 315        response = self.connection.make_request('GET', self.name,
 316                                                headers=headers,
 317                                                query_args=s)
 318        body = response.read()
 319        boto.log.debug(body)
 320        if response.status == 200:
 321            rs = ResultSet(element_map)
 322            h = handler.XmlHandler(rs, self)
 323            xml.sax.parseString(body, h)
 324            return rs
 325        else:
 326            raise self.connection.provider.storage_response_error(
 327                response.status, response.reason, body)
 328
 329    def get_all_keys(self, headers=None, **params):
 330        """
 331        A lower-level method for listing contents of a bucket.
 332        This closely models the actual S3 API and requires you to manually
 333        handle the paging of results.  For a higher-level method
 334        that handles the details of paging for you, you can use the list method.
 335        
 336        :type max_keys: int
 337        :param max_keys: The maximum number of keys to retrieve
 338        
 339        :type prefix: string
 340        :param prefix: The prefix of the keys you want to retrieve
 341        
 342        :type marker: string
 343        :param marker: The "marker" of where you are in the result set
 344        
 345        :type delimiter: string 
 346        :param delimiter: If this optional, Unicode string parameter
 347                          is included with your request, then keys that
 348                          contain the same string between the prefix and
 349                          the first occurrence of the delimiter will be
 350                          rolled up into a single result element in the
 351                          CommonPrefixes collection. These rolled-up keys
 352                          are not returned elsewhere in the response.
 353
 354        :rtype: ResultSet
 355        :return: The result from S3 listing the keys requested
 356        
 357        """
 358        return self._get_all([('Contents', self.key_class),
 359                              ('CommonPrefixes', Prefix)],
 360                             '', headers, **params)
 361
 362    def get_all_versions(self, headers=None, **params):
 363        """
 364        A lower-level, version-aware method for listing contents of a bucket.
 365        This closely models the actual S3 API and requires you to manually
 366        handle the paging of results.  For a higher-level method
 367        that handles the details of paging for you, you can use the list method.
 368        
 369        :type max_keys: int
 370        :param max_keys: The maximum number of keys to retrieve
 371        
 372        :type prefix: string
 373        :param prefix: The prefix of the keys you want to retrieve
 374        
 375        :type key_marker: string
 376        :param key_marker: The "marker" of where you are in the result set
 377                           with respect to keys.
 378        
 379        :type version_id_marker: string
 380        :param version_id_marker: The "marker" of where you are in the result
 381                                  set with respect to version-id's.
 382        
 383        :type delimiter: string 
 384        :param delimiter: If this optional, Unicode string parameter
 385                          is included with your request, then keys that
 386                          contain the same string between the prefix and
 387                          the first occurrence of the delimiter will be
 388                          rolled up into a single result element in the
 389                          CommonPrefixes collection. These rolled-up keys
 390                          are not returned elsewhere in the response.
 391
 392        :rtype: ResultSet
 393        :return: The result from S3 listing the keys requested
 394        
 395        """
 396        return self._get_all([('Version', self.key_class),
 397                              ('CommonPrefixes', Prefix),
 398                              ('DeleteMarker', DeleteMarker)],
 399                             'versions', headers, **params)
 400
 401    def get_all_multipart_uploads(self, headers=None, **params):
 402        """
 403        A lower-level, version-aware method for listing active
 404        MultiPart uploads for a bucket.  This closely models the
 405        actual S3 API and requires you to manually handle the paging
 406        of results.  For a higher-level method that handles the
 407        details of paging for you, you can use the list method.
 408        
 409        :type max_uploads: int
 410        :param max_uploads: The maximum number of uploads to retrieve.
 411                            Default value is 1000.
 412        
 413        :type key_marker: string
 414        :param key_marker: Together with upload_id_marker, this parameter
 415                           specifies the multipart upload after which listing
 416                           should begin.  If upload_id_marker is not specified,
 417                           only the keys lexicographically greater than the
 418                           specified key_marker will be included in the list.
 419
 420                           If upload_id_marker is specified, any multipart
 421                           uploads for a key equal to the key_marker might
 422                           also be included, provided those multipart uploads
 423                           have upload IDs lexicographically greater than the
 424                           specified upload_id_marker.
 425        
 426        :type upload_id_marker: string
 427        :param upload_id_marker: Together with key-marker, specifies
 428                                 the multipart upload after which listing
 429                                 should begin. If key_marker is not specified,
 430                                 the upload_id_marker parameter is ignored.
 431                                 Otherwise, any multipart uploads for a key
 432                                 equal to the key_marker might be included
 433                                 in the list only if they have an upload ID
 434                                 lexicographically greater than the specified
 435                                 upload_id_marker.
 436
 437        
 438        :rtype: ResultSet
 439        :return: The result from S3 listing the uploads requested
 440        
 441        """
 442        return self._get_all([('Upload', MultiPartUpload),
 443                              ('CommonPrefixes', Prefix)],
 444                             'uploads', headers, **params)
 445
 446    def new_key(self, key_name=None):
 447        """
 448        Creates a new key
 449        
 450        :type key_name: string
 451        :param key_name: The name of the key to create
 452        
 453        :rtype: :class:`boto.s3.key.Key` or subclass
 454        :returns: An instance of the newly created key object
 455        """
 456        return self.key_class(self, key_name)
 457
 458    def generate_url(self, expires_in, method='GET', headers=None,
 459                     force_http=False, response_headers=None,
 460                     expires_in_absolute=False):
 461        return self.connection.generate_url(expires_in, method, self.name,
 462                                            headers=headers,
 463                                            force_http=force_http,
 464                                            response_headers=response_headers,
 465                                            expires_in_absolute=expires_in_absolute)
 466
 467    def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
 468        """
 469        Deletes a set of keys using S3's Multi-object delete API. If a
 470        VersionID is specified for that key then that version is removed.
 471        Returns a MultiDeleteResult Object, which contains Deleted
 472        and Error elements for each key you ask to delete.
 473        
 474        :type keys: list
 475        :param keys: A list of either key_names or (key_name, versionid) pairs
 476                     or a list of Key instances.
 477
 478        :type quiet: boolean
 479        :param quiet: In quiet mode the response includes only keys where
 480                      the delete operation encountered an error. For a
 481                      successful deletion, the operation does not return
 482                      any information about the delete in the response body.
 483
 484        :type mfa_token: tuple or list of strings
 485        :param mfa_token: A tuple or list consisting of the serial number
 486                          from the MFA device and the current value of
 487                          the six-digit token associated with the device.
 488                          This value is required anytime you are
 489                          deleting versioned objects from a bucket
 490                          that has the MFADelete option on the bucket.
 491
 492        :returns: An instance of MultiDeleteResult
 493        """
 494        ikeys = iter(keys)
 495        result = MultiDeleteResult(self)
 496        provider = self.connection.provider
 497        query_args = 'delete'
 498        def delete_keys2(hdrs):
 499            hdrs = hdrs or {}
 500            data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
 501            data += u"<Delete>"
 502            if quiet:
 503                data += u"<Quiet>true</Quiet>"
 504            count = 0
 505            while count < 1000:
 506                try:
 507                    key = ikeys.next()
 508                except StopIteration:
 509                    break
 510                if isinstance(key, basestring):
 511                    key_name = key
 512                    version_id = None
 513                elif isinstance(key, tuple) and len(key) == 2:
 514                    key_name, version_id = key
 515                elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
 516                    key_name = key.name
 517                    version_id = key.version_id
 518                else:
 519                    if isinstance(key, Prefix):
 520                        key_name = key.name
 521                        code = 'PrefixSkipped'   # Don't delete Prefix
 522                    else:
 523                        key_name = repr(key)     # try get a string
 524                        code = 'InvalidArgument' # other unknown type
 525                    message = 'Invalid. No delete action taken for this object.'
 526                    error = Error(key_name, code=code, message=message)
 527                    result.errors.append(error)
 528                    continue
 529                count += 1
 530                #key_name = key_name.decode('utf-8')
 531                data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
 532                if version_id:
 533                    data += u"<VersionId>%s</VersionId>" % version_id
 534                data += u"</Object>"
 535            data += u"</Delete>"
 536            if count <= 0:
 537                return False # no more
 538            data = data.encode('utf-8')
 539            fp = StringIO.StringIO(data)
 540            md5 = boto.utils.compute_md5(fp)
 541            hdrs['Content-MD5'] = md5[1]
 542            hdrs['Content-Type'] = 'text/xml'
 543            if mfa_token:
 544                hdrs[provider.mfa_header] = ' '.join(mfa_token)
 545            response = self.connection.make_request('POST', self.name,
 546                                                    headers=hdrs,
 547                                                    query_args=query_args,
 548                                                    data=data)
 549            body = response.read()
 550            if response.status == 200:
 551                h = handler.XmlHandler(result, self)
 552                xml.sax.parseString(body, h)
 553                return count >= 1000 # more?
 554            else:
 555                raise provider.storage_response_error(response.status,
 556                                                      response.reason,
 557                                                      body)
 558        while delete_keys2(headers):
 559            pass
 560        return result
 561
 562    def delete_key(self, key_name, headers=None,
 563                   version_id=None, mfa_token=None):
 564        """
 565        Deletes a key from the bucket.  If a version_id is provided,
 566        only that version of the key will be deleted.
 567        
 568        :type key_name: string
 569        :param key_name: The key name to delete
 570
 571        :type version_id: string
 572        :param version_id: The version ID (optional)
 573        
 574        :type mfa_token: tuple or list of strings
 575        :param mfa_token: A tuple or list consisting of the serial number
 576                          from the MFA device and the current value of
 577                          the six-digit token associated with the device.
 578                          This value is required anytime you are
 579                          deleting versioned objects from a bucket
 580                          that has the MFADelete option on the bucket.
 581
 582        :rtype: :class:`boto.s3.key.Key` or subclass
 583        :returns: A key object holding information on what was deleted.
 584                  The Caller can see if a delete_marker was created or
 585                  removed and what version_id the delete created or removed.
 586        """
 587        provider = self.connection.provider
 588        if version_id:
 589            query_args = 'versionId=%s' % version_id
 590        else:
 591            query_args = None
 592        if mfa_token:
 593            if not headers:
 594                headers = {}
 595            headers[provider.mfa_header] = ' '.join(mfa_token)
 596        response = self.connection.make_request('DELETE', self.name, key_name,
 597                                                headers=headers,
 598                                                query_args=query_args)
 599        body = response.read()
 600        if response.status != 204:
 601            raise provider.storage_response_error(response.status,
 602                                                  response.reason, body)
 603        else:
 604            # return a key object with information on what was deleted.
 605            k = self.key_class(self)
 606            k.name = key_name
 607            k.handle_version_headers(response)
 608            return k
 609
 610    def copy_key(self, new_key_name, src_bucket_name,
 611                 src_key_name, metadata=None, src_version_id=None,
 612                 storage_class='STANDARD', preserve_acl=False,
 613                 encrypt_key=False, headers=None, query_args=None):
 614        """
 615        Create a new key in the bucket by copying another existing key.
 616
 617        :type new_key_name: string
 618        :param new_key_name: The name of the new key
 619
 620        :type src_bucket_name: string
 621        :param src_bucket_name: The name of the source bucket
 622
 623        :type src_key_name: string
 624        :param src_key_name: The name of the source key
 625
 626        :type src_version_id: string
 627        :param src_version_id: The version id for the key.  This param
 628                               is optional.  If not specified, the newest
 629                               version of the key will be copied.
 630
 631        :type metadata: dict
 632        :param metadata: Metadata to be associated with new key.
 633                         If metadata is supplied, it will replace the
 634                         metadata of the source key being copied.
 635                         If no metadata is supplied, the source key's
 636                         metadata will be copied to the new key.
 637
 638        :type storage_class: string
 639        :param storage_class: The storage class of the new key.
 640                              By default, the new key will use the
 641                              standard storage class.  Possible values are:
 642                              STANDARD | REDUCED_REDUNDANCY
 643
 644        :type preserve_acl: bool
 645        :param preserve_acl: If True, the ACL from the source key
 646                             will be copied to the destination
 647                             key.  If False, the destination key
 648                             will have the default ACL.
 649                             Note that preserving the ACL in the
 650                             new key object will require two
 651                             additional API calls to S3, one to
 652                             retrieve the current ACL and one to
 653                             set that ACL on the new object.  If
 654                             you don't care about the ACL, a value
 655                             of False will be significantly more
 656                             efficient.
 657
 658        :type encrypt_key: bool
 659        :param encrypt_key: If True, the new copy of the object will
 660                            be encrypted on the server-side by S3 and
 661                            will be stored in an encrypted form while
 662                            at rest in S3.
 663
 664        :type headers: dict
 665        :param headers: A dictionary of header name/value pairs.
 666
 667        :type query_args: string
 668        :param query_args: A string of additional querystring arguments
 669                           to append to the request
 670
 671        :rtype: :class:`boto.s3.key.Key` or subclass
 672        :returns: An instance of the newly created key object
 673        """
 674        headers = headers or {}
 675        provider = self.connection.provider
 676        src_key_name = boto.utils.get_utf8_value(src_key_name)
 677        if preserve_acl:
 678            if self.name == src_bucket_name:
 679                src_bucket = self
 680            else:
 681                src_bucket = self.connection.get_bucket(src_bucket_name)
 682            acl = src_bucket.get_xml_acl(src_key_name)
 683        if encrypt_key:
 684            headers[provider.server_side_encryption_header] = 'AES256'
 685        src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
 686        if src_version_id:
 687            src += '?versionId=%s' % src_version_id
 688        headers[provider.copy_source_header] = str(src)
 689        # make sure storage_class_header key exists before accessing it
 690        if provider.storage_class_header and storage_class:
 691            headers[provider.storage_class_header] = storage_class
 692        if metadata:
 693            headers[provider.metadata_directive_header] = 'REPLACE'
 694            headers = boto.utils.merge_meta(headers, metadata, provider)
 695        elif not query_args: # Can't use this header with multi-part copy.
 696            headers[provider.metadata_directive_header] = 'COPY'
 697        response = self.connection.make_request('PUT', self.name, new_key_name,
 698                                                headers=headers,
 699                                                query_args=query_args)
 700        body = response.read()
 701        if response.status == 200:
 702            key = self.new_key(new_key_name)
 703            h = handler.XmlHandler(key, self)
 704            xml.sax.parseString(body, h)
 705            if hasattr(key, 'Error'):
 706                raise provider.storage_copy_error(key.Code, key.Message, body)
 707            key.handle_version_headers(response)
 708            if preserve_acl:
 709                self.set_xml_acl(acl, new_key_name)
 710            return key
 711        else:
 712            raise provider.storage_response_error(response.status,
 713                                                  response.reason, body)
 714
 715    def set_canned_acl(self, acl_str, key_name='', headers=None,
 716                       version_id=None):
 717        assert acl_str in CannedACLStrings
 718
 719        if headers:
 720            headers[self.connection.provider.acl_header] = acl_str
 721        else:
 722            headers={self.connection.provider.acl_header: acl_str}
 723
 724        query_args = 'acl'
 725        if version_id:
 726            query_args += '&versionId=%s' % version_id
 727        response = self.connection.make_request('PUT', self.name, key_name,
 728                headers=headers, query_args=query_args)
 729        body = response.read()
 730        if response.status != 200:
 731            raise self.connection.provider.storage_response_error(
 732                response.status, response.reason, body)
 733
 734    def get_xml_acl(self, key_name='', headers=None, version_id=None):
 735        query_args = 'acl'
 736        if version_id:
 737            query_args += '&versionId=%s' % version_id
 738        response = self.connection.make_request('GET', self.name, key_name,
 739                                                query_args=query_args,
 740                                                headers=headers)
 741        body = response.read()
 742        if response.status != 200:
 743            raise self.connection.provider.storage_response_error(
 744                response.status, response.reason, body)
 745        return body
 746
 747    def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
 748                    query_args='acl'):
 749        if version_id:
 750            query_args += '&versionId=%s' % version_id
 751        response = self.connection.make_request('PUT', self.name, key_name,
 752                                                data=acl_str.encode('UTF-8'),
 753                                                query_args=query_args,
 754                                                headers=headers)
 755        body = response.read()
 756        if response.status != 200:
 757            raise self.connection.provider.storage_response_error(
 758                response.status, response.reason, body)
 759
 760    def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
 761        if isinstance(acl_or_str, Policy):
 762            self.set_xml_acl(acl_or_str.to_xml(), key_name,
 763                             headers, version_id)
 764        else:
 765            self.set_canned_acl(acl_or_str, key_name,
 766                                headers, version_id)
 767
 768    def get_acl(self, key_name='', headers=None, version_id=None):
 769        query_args = 'acl'
 770        if version_id:
 771            query_args += '&versionId=%s' % version_id
 772        response = self.connection.make_request('GET', self.name, key_name,
 773                                                query_args=query_args,
 774                                                headers=headers)
 775        body = response.read()
 776        if response.status == 200:
 777            policy = Policy(self)
 778            h = handler.XmlHandler(policy, self)
 779            xml.sax.parseString(body, h)
 780            return policy
 781        else:
 782            raise self.connection.provider.storage_response_error(
 783                response.status, response.reason, body)
 784
 785    def set_subresource(self, subresource, value, key_name = '', headers=None,
 786                        version_id=None):
 787        """
 788        Set a subresource for a bucket or key.
 789
 790        :type subresource: string
 791        :param subresource: The subresource to set.
 792
 793        :type value: string
 794        :param value: The value of the subresource.
 795
 796        :type key_name: string
 797        :param key_name: The key to operate on, or None to operate on the
 798                         bucket.
 799
 800        :type headers: dict
 801        :param headers: Additional HTTP headers to include in the request.
 802
 803        :type src_version_id: string
 804        :param src_version_id: Optional. The version id of the key to operate
 805                               on. If not specified, operate on the newest
 806                               version.
 807        """
 808        if not subresource:
 809            raise TypeError('set_subresource called with subresource=None')
 810        query_args = subresource
 811        if version_id:
 812            query_args += '&versionId=%s' % version_id
 813        response = self.connection.make_request('PUT', self.name, key_name,
 814                                                data=value.encode('UTF-8'),
 815                                                query_args=query_args,
 816                                                headers=headers)
 817        body = response.read()
 818        if response.status != 200:
 819            raise self.connection.provider.storage_response_error(
 820                response.status, response.reason, body)
 821
 822    def get_subresource(self, subresource, key_name='', headers=None,
 823                        version_id=None):
 824        """
 825        Get a subresource for a bucket or key.
 826
 827        :type subresource: string
 828        :param subresource: The subresource to get.
 829
 830        :type key_name: string
 831        :param key_name: The key to operate on, or None to operate on the
 832                         bucket.
 833
 834        :type headers: dict
 835        :param headers: Additional HTTP headers to include in the request.
 836
 837        :type src_version_id: string
 838        :param src_version_id: Optional. The version id of the key to operate
 839                               on. If not specified, operate on the newest
 840                               version.
 841
 842        :rtype: string
 843        :returns: The value of the subresource.
 844        """
 845        if not subresource:
 846            raise TypeError('get_subresource called with subresource=None')
 847        query_args = subresource
 848        if version_id:
 849            query_args += '&versionId=%s' % version_id
 850        response = self.connection.make_request('GET', self.name, key_name,
 851                                                query_args=query_args,
 852                                                headers=headers)
 853        body = response.read()
 854        if response.status != 200:
 855            raise self.connection.provider.storage_response_error(
 856                response.status, response.reason, body)
 857        return body
 858
 859    def make_public(self, recursive=False, headers=None):
 860        self.set_canned_acl('public-read', headers=headers)
 861        if recursive:
 862            for key in self:
 863                self.set_canned_acl('public-read', key.name, headers=headers)
 864
 865    def add_email_grant(self, permission, email_address,
 866                        recursive=False, headers=None):
 867        """
 868        Convenience method that provides a quick way to add an email grant
 869        to a bucket. This method retrieves the current ACL, creates a new
 870        grant based on the parameters passed in, adds that grant to the ACL
 871        and then PUT's the new ACL back to S3.
 872        
 873        :type permission: string
 874        :param permission: The permission being granted. Should be one of:
 875                           (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
 876        
 877        :type email_address: string
 878        :param email_address: The email address associated with the AWS
 879                              account your are granting the permission to.
 880        
 881        :type recursive: boolean
 882        :param recursive: A boolean value to controls whether the command
 883                          will apply the grant to all keys within the bucket
 884                          or not.  The default value is False.  By passing a
 885                          True value, the call will iterate through all keys
 886                          in the bucket and apply the same grant to each key.
 887                          CAUTION: If you have a lot of keys, this could take
 888                          a long time!
 889        """
 890        if permission not in S3Permissions:
 891            raise self.connection.provider.storage_permissions_error(
 892                'Unknown Permission: %s' % permission)
 893        policy = self.get_acl(headers=headers)
 894        policy.acl.add_email_grant(permission, email_address)
 895        self.set_acl(policy, headers=headers)
 896        if recursive:
 897            for key in self:
 898                key.add_email_grant(permission, email_address, headers=headers)
 899
 900    def add_user_grant(self, permission, user_id, recursive=False,
 901                       headers=None, display_name=None):
 902        """
 903        Convenience method that provides a quick way to add a canonical
 904        user grant to a bucket.  This method retrieves the current ACL,
 905        creates a new grant based on the parameters passed in, adds that
 906        grant to the ACL and then PUT's the new ACL back to S3.
 907        
 908        :type permission: string
 909        :param permission: The permission being granted. Should be one of:
 910                           (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
 911        
 912        :type user_id: string
 913        :param user_id:     The canonical user id associated with the AWS
 914                            account your are granting the permission to.
 915                            
 916        :type recursive: boolean
 917        :param recursive: A boolean value to controls whether the command
 918                          will apply the grant to all keys within the bucket
 919                          or not.  The default value is False.  By passing a
 920                          True value, the call will iterate through all keys
 921                          in the bucket and apply the same grant to each key.
 922                          CAUTION: If you have a lot of keys, this could take
 923                          a long time!
 924                          
 925        :type display_name: string
 926        :param display_name: An option string containing the user's
 927                             Display Name.  Only required on Walrus.
 928        """
 929        if permission not in S3Permissions:
 930            raise self.connection.provider.storage_permissions_error(
 931                'Unknown Permission: %s' % permission)
 932        policy = self.get_acl(headers=headers)
 933        policy.acl.add_user_grant(permission, user_id,
 934                                  display_name=display_name)
 935        self.set_acl(policy, headers=headers)
 936        if recursive:
 937            for key in self:
 938                key.add_user_grant(permission, user_id, headers=headers,
 939                                   display_name=display_name)
 940
 941    def list_grants(self, headers=None):
 942        policy = self.get_acl(headers=headers)
 943        return policy.acl.grants
 944
 945    def get_location(self):
 946        """
 947        Returns the LocationConstraint for the bucket.
 948
 949        :rtype: str
 950        :return: The LocationConstraint for the bucket or the empty
 951                 string if no constraint was specified when bucket
 952                 was created.
 953        """
 954        response = self.connection.make_request('GET', self.name,
 955                                                query_args='location')
 956        body = response.read()
 957        if response.status == 200:
 958            rs = ResultSet(self)
 959            h = handler.XmlHandler(rs, self)
 960            xml.sax.parseString(body, h)
 961            return rs.LocationConstraint
 962        else:
 963            raise self.connection.provider.storage_response_error(
 964                response.status, response.reason, body)
 965
 966    def set_xml_logging(self, logging_str, headers=None):
 967        """
 968        Set logging on a bucket directly to the given xml string.
 969
 970        :type logging_str: unicode string
 971        :param logging_str: The XML for the bucketloggingstatus which will be set.
 972                            The string will be converted to utf-8 before it is sent.
 973                            Usually, you will obtain this XML from the BucketLogging
 974                            object.
 975
 976        :rtype: bool
 977        :return: True if ok or raises an exception.
 978        """
 979        body = logging_str.encode('utf-8')
 980        response = self.connection.make_request('PUT', self.name, data=body,
 981                query_args='logging', headers=headers)
 982        body = response.read()
 983        if response.status == 200:
 984            return True
 985        else:
 986            raise self.connection.provider.storage_response_error(
 987                response.status, response.reason, body)
 988        
 989    def enable_logging(self, target_bucket, target_prefix='', grants=None, headers=None):
 990        """
 991        Enable logging on a bucket.
 992
 993        :type target_bucket: bucket or string
 994        :param target_bucket: The bucket to log to.
 995
 996        :type target_prefix: string
 997        :param target_prefix: The prefix which should be prepended to the 
 998                              generated log files written to the target_bucket.
 999
1000        :type grants: list of Grant objects
1001        :param grants: A list of extra permissions which will be granted on
1002                       the log files which are created.
1003
1004        :rtype: bool
1005        :return: True if ok or raises an exception.
1006        """
1007        if isinstance(target_bucket, Bucket):
1008            target_bucket = target_bucket.name
1009        blogging = BucketLogging(target=target_bucket, prefix=target_prefix, grants=grants)
1010        return self.set_xml_logging(blogging.to_xml(), headers=headers)
1011 
1012    def disable_logging(self, headers=None):
1013        """
1014        Disable logging on a bucket.
1015
1016        :rtype: bool
1017        :return: True if ok or raises an exception.
1018        """
1019        blogging = BucketLogging()
1020        return self.set_xml_logging(blogging.to_xml(), headers=headers)
1021
1022    def get_logging_status(self, headers=None):
1023        """
1024        Get the logging status for this bucket.
1025
1026        :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
1027        :return: A BucketLogging object for this bucket.
1028        """
1029        response = self.connection.make_request('GET', self.name,
1030                query_args='logging', headers=headers)
1031        body = response.read()
1032        if response.status == 200:
1033            blogging = BucketLogging()
1034            h = handler.XmlHandler(blogging, self)
1035            xml.sax.parseString(body, h)
1036            return blogging
1037        else:
1038            raise self.connection.provider.storage_response_error(
1039                response.status, response.reason, body)
1040
1041    def set_as_logging_target(self, headers=None):
1042        """
1043        Setup the current bucket as a logging target by granting the necessary
1044        permissions to the LogDelivery group to write log files to this bucket.
1045        """
1046        policy = self.get_acl(headers=headers)
1047        g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
1048        g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
1049        policy.acl.add_grant(g1)
1050        policy.acl.add_grant(g2)
1051        self.set_acl(policy, headers=headers)
1052
1053    def get_request_payment(self, headers=None):
1054        response = self.connection.make_request('GET', self.name,
1055                query_args='requestPayment', headers=headers)
1056        body = response.read()
1057        if response.status == 200:
1058            return body
1059        else:
1060            raise self.connection.provider.storage_response_error(
1061                response.status, response.reason, body)
1062
1063    def set_request_payment(self, payer='BucketOwner', headers=None):
1064        body = self.BucketPaymentBody % payer
1065        response = self.connection.make_request('PUT', self.name, data=body,
1066                query_args='requestPayment', headers=headers)
1067        body = response.read()
1068        if response.status == 200:
1069            return True
1070        else:
1071            raise self.connection.provider.storage_response_error(
1072                response.status, response.reason, body)
1073        
1074    def configure_versioning(self, versioning, mfa_delete=False,
1075                             mfa_token=None, headers=None):
1076        """
1077        Configure versioning for this bucket.
1078        
1079        ..note:: This feature is currently in beta.
1080                 
1081        :type versioning: bool
1082        :param versioning: A boolean indicating whether version is
1083                           enabled (True) or disabled (False).
1084
1085        :type mfa_delete: bool
1086        :param mfa_delete: A boolean indicating whether the Multi-Factor
1087                           Authentication Delete feature is enabled (True)
1088                           or disabled (False).  If mfa_delete is enabled
1089                           then all Delete operations will require the
1090                           token from your MFA device to be passed in
1091                           the request.
1092
1093        :type mfa_token: tuple or list of strings
1094        :param mfa_token: A tuple or list consisting of the serial number
1095                          from the MFA device and the current value of
1096                          the six-digit token associated with the device.
1097                          This value is required when you are changing
1098                          the status of the MfaDelete property of
1099                          the bucket.
1100        """
1101        if versioning:
1102            ver = 'Enabled'
1103        else:
1104            ver = 'Suspended'
1105        if mfa_delete:
1106            mfa = 'Enabled'
1107        else:
1108            mfa = 'Disabled'
1109        body = self.VersioningBody % (ver, mfa)
1110        if mfa_token:
1111            if not headers:
1112                headers = {}
1113            provider = self.connection.provider
1114            headers[provider.mfa_header] = ' '.join(mfa_token)
1115        response = self.connection.make_request('PUT', self.name, data=body,
1116                query_args='versioning', headers=headers)
1117        body = response.read()
1118        if response.status == 200:
1119            return True
1120        else:
1121            raise self.connection.provider.storage_response_error(
1122                response.sta

Large files files are truncated, but you can click here to view the full file