/boto-2.5.2/boto/s3/bucket.py
Python | 1468 lines | 1437 code | 8 blank | 23 comment | 0 complexity | 32d5d29b8ca53f10325a9b4e6c282c1e MD5 | raw file
Large files files are truncated, but you can click here to view the full file
- # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
- # Copyright (c) 2010, Eucalyptus Systems, Inc.
- # All rights reserved.
- #
- # Permission is hereby granted, free of charge, to any person obtaining a
- # copy of this software and associated documentation files (the
- # "Software"), to deal in the Software without restriction, including
- # without limitation the rights to use, copy, modify, merge, publish, dis-
- # tribute, sublicense, and/or sell copies of the Software, and to permit
- # persons to whom the Software is furnished to do so, subject to the fol-
- # lowing conditions:
- #
- # The above copyright notice and this permission notice shall be included
- # in all copies or substantial portions of the Software.
- #
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
- # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- # IN THE SOFTWARE.
- import boto
- from boto import handler
- from boto.resultset import ResultSet
- from boto.exception import BotoClientError
- from boto.s3.acl import Policy, CannedACLStrings, Grant
- from boto.s3.key import Key
- from boto.s3.prefix import Prefix
- from boto.s3.deletemarker import DeleteMarker
- from boto.s3.multipart import MultiPartUpload
- from boto.s3.multipart import CompleteMultiPartUpload
- from boto.s3.multidelete import MultiDeleteResult
- from boto.s3.multidelete import Error
- from boto.s3.bucketlistresultset import BucketListResultSet
- from boto.s3.bucketlistresultset import VersionedBucketListResultSet
- from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
- from boto.s3.lifecycle import Lifecycle
- from boto.s3.bucketlogging import BucketLogging
- import boto.jsonresponse
- import boto.utils
- import xml.sax
- import xml.sax.saxutils
- import StringIO
- import urllib
- import re
- import base64
- from collections import defaultdict
- # as per http://goo.gl/BDuud (02/19/2011)
- class S3WebsiteEndpointTranslate:
- trans_region = defaultdict(lambda :'s3-website-us-east-1')
- trans_region['eu-west-1'] = 's3-website-eu-west-1'
- trans_region['us-west-1'] = 's3-website-us-west-1'
- trans_region['us-west-2'] = 's3-website-us-west-2'
- trans_region['sa-east-1'] = 's3-website-sa-east-1'
- trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
- trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
- @classmethod
- def translate_region(self, reg):
- return self.trans_region[reg]
- S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
- class Bucket(object):
- LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
- BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
- <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Payer>%s</Payer>
- </RequestPaymentConfiguration>"""
- VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
- <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Status>%s</Status>
- <MfaDelete>%s</MfaDelete>
- </VersioningConfiguration>"""
- WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?>
- <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <IndexDocument><Suffix>%s</Suffix></IndexDocument>
- %s
- </WebsiteConfiguration>"""
- WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>"""
- VersionRE = '<Status>([A-Za-z]+)</Status>'
- MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
- def __init__(self, connection=None, name=None, key_class=Key):
- self.name = name
- self.connection = connection
- self.key_class = key_class
- def __repr__(self):
- return '<Bucket: %s>' % self.name
- def __iter__(self):
- return iter(BucketListResultSet(self))
- def __contains__(self, key_name):
- return not (self.get_key(key_name) is None)
- def startElement(self, name, attrs, connection):
- return None
- def endElement(self, name, value, connection):
- if name == 'Name':
- self.name = value
- elif name == 'CreationDate':
- self.creation_date = value
- else:
- setattr(self, name, value)
- def set_key_class(self, key_class):
- """
- Set the Key class associated with this bucket. By default, this
- would be the boto.s3.key.Key class but if you want to subclass that
- for some reason this allows you to associate your new class with a
- bucket so that when you call bucket.new_key() or when you get a listing
- of keys in the bucket you will get an instances of your key class
- rather than the default.
-
- :type key_class: class
- :param key_class: A subclass of Key that can be more specific
- """
- self.key_class = key_class
- def lookup(self, key_name, headers=None):
- """
- Deprecated: Please use get_key method.
-
- :type key_name: string
- :param key_name: The name of the key to retrieve
-
- :rtype: :class:`boto.s3.key.Key`
- :returns: A Key object from this bucket.
- """
- return self.get_key(key_name, headers=headers)
-
- def get_key(self, key_name, headers=None, version_id=None, response_headers=None):
- """
- Check to see if a particular key exists within the bucket. This
- method uses a HEAD request to check for the existance of the key.
- Returns: An instance of a Key object or None
-
- :type key_name: string
- :param key_name: The name of the key to retrieve
- :type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
-
- :rtype: :class:`boto.s3.key.Key`
- :returns: A Key object from this bucket.
- """
- query_args = []
- if version_id:
- query_args.append('versionId=%s' % version_id)
- if response_headers:
- for rk, rv in response_headers.iteritems():
- query_args.append('%s=%s' % (rk, urllib.quote(rv)))
- if query_args:
- query_args = '&'.join(query_args)
- else:
- query_args = None
- response = self.connection.make_request('HEAD', self.name, key_name,
- headers=headers,
- query_args=query_args)
- response.read()
- # Allow any success status (2xx) - for example this lets us
- # support Range gets, which return status 206:
- if response.status/100 == 2:
- k = self.key_class(self)
- provider = self.connection.provider
- k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
- k.etag = response.getheader('etag')
- k.content_type = response.getheader('content-type')
- k.content_encoding = response.getheader('content-encoding')
- k.content_disposition = response.getheader('content-disposition')
- k.content_language = response.getheader('content-language')
- k.last_modified = response.getheader('last-modified')
- # the following machinations are a workaround to the fact that
- # apache/fastcgi omits the content-length header on HEAD
- # requests when the content-length is zero.
- # See http://goo.gl/0Tdax for more details.
- clen = response.getheader('content-length')
- if clen:
- k.size = int(response.getheader('content-length'))
- else:
- k.size = 0
- k.cache_control = response.getheader('cache-control')
- k.name = key_name
- k.handle_version_headers(response)
- k.handle_encryption_headers(response)
- return k
- else:
- if response.status == 404:
- return None
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, '')
- def list(self, prefix='', delimiter='', marker='', headers=None):
- """
- List key objects within a bucket. This returns an instance of an
- BucketListResultSet that automatically handles all of the result
- paging, etc. from S3. You just need to keep iterating until
- there are no more results.
-
- Called with no arguments, this will return an iterator object across
- all keys within the bucket.
- The Key objects returned by the iterator are obtained by parsing
- the results of a GET on the bucket, also known as the List Objects
- request. The XML returned by this request contains only a subset
- of the information about each key. Certain metadata fields such
- as Content-Type and user metadata are not available in the XML.
- Therefore, if you want these additional metadata fields you will
- have to do a HEAD request on the Key in the bucket.
-
- :type prefix: string
- :param prefix: allows you to limit the listing to a particular
- prefix. For example, if you call the method with
- prefix='/foo/' then the iterator will only cycle
- through the keys that begin with the string '/foo/'.
-
- :type delimiter: string
- :param delimiter: can be used in conjunction with the prefix
- to allow you to organize and browse your keys
- hierarchically. See:
- http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
- for more details.
-
- :type marker: string
- :param marker: The "marker" of where you are in the result set
-
- :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
- :return: an instance of a BucketListResultSet that handles paging, etc
- """
- return BucketListResultSet(self, prefix, delimiter, marker, headers)
- def list_versions(self, prefix='', delimiter='', key_marker='',
- version_id_marker='', headers=None):
- """
- List version objects within a bucket. This returns an instance of an
- VersionedBucketListResultSet that automatically handles all of the result
- paging, etc. from S3. You just need to keep iterating until
- there are no more results.
- Called with no arguments, this will return an iterator object across
- all keys within the bucket.
-
- :type prefix: string
- :param prefix: allows you to limit the listing to a particular
- prefix. For example, if you call the method with
- prefix='/foo/' then the iterator will only cycle
- through the keys that begin with the string '/foo/'.
-
- :type delimiter: string
- :param delimiter: can be used in conjunction with the prefix
- to allow you to organize and browse your keys
- hierarchically. See:
- http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
- for more details.
-
- :type marker: string
- :param marker: The "marker" of where you are in the result set
-
- :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
- :return: an instance of a BucketListResultSet that handles paging, etc
- """
- return VersionedBucketListResultSet(self, prefix, delimiter, key_marker,
- version_id_marker, headers)
- def list_multipart_uploads(self, key_marker='',
- upload_id_marker='',
- headers=None):
- """
- List multipart upload objects within a bucket. This returns an
- instance of an MultiPartUploadListResultSet that automatically
- handles all of the result paging, etc. from S3. You just need
- to keep iterating until there are no more results.
-
- :type marker: string
- :param marker: The "marker" of where you are in the result set
-
- :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
- :return: an instance of a BucketListResultSet that handles paging, etc
- """
- return MultiPartUploadListResultSet(self, key_marker,
- upload_id_marker,
- headers)
- def _get_all(self, element_map, initial_query_string='',
- headers=None, **params):
- l = []
- for k, v in params.items():
- k = k.replace('_', '-')
- if k == 'maxkeys':
- k = 'max-keys'
- if isinstance(v, unicode):
- v = v.encode('utf-8')
- if v is not None and v != '':
- l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
- if len(l):
- s = initial_query_string + '&' + '&'.join(l)
- else:
- s = initial_query_string
- response = self.connection.make_request('GET', self.name,
- headers=headers,
- query_args=s)
- body = response.read()
- boto.log.debug(body)
- if response.status == 200:
- rs = ResultSet(element_map)
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def get_all_keys(self, headers=None, **params):
- """
- A lower-level method for listing contents of a bucket.
- This closely models the actual S3 API and requires you to manually
- handle the paging of results. For a higher-level method
- that handles the details of paging for you, you can use the list method.
-
- :type max_keys: int
- :param max_keys: The maximum number of keys to retrieve
-
- :type prefix: string
- :param prefix: The prefix of the keys you want to retrieve
-
- :type marker: string
- :param marker: The "marker" of where you are in the result set
-
- :type delimiter: string
- :param delimiter: If this optional, Unicode string parameter
- is included with your request, then keys that
- contain the same string between the prefix and
- the first occurrence of the delimiter will be
- rolled up into a single result element in the
- CommonPrefixes collection. These rolled-up keys
- are not returned elsewhere in the response.
- :rtype: ResultSet
- :return: The result from S3 listing the keys requested
-
- """
- return self._get_all([('Contents', self.key_class),
- ('CommonPrefixes', Prefix)],
- '', headers, **params)
- def get_all_versions(self, headers=None, **params):
- """
- A lower-level, version-aware method for listing contents of a bucket.
- This closely models the actual S3 API and requires you to manually
- handle the paging of results. For a higher-level method
- that handles the details of paging for you, you can use the list method.
-
- :type max_keys: int
- :param max_keys: The maximum number of keys to retrieve
-
- :type prefix: string
- :param prefix: The prefix of the keys you want to retrieve
-
- :type key_marker: string
- :param key_marker: The "marker" of where you are in the result set
- with respect to keys.
-
- :type version_id_marker: string
- :param version_id_marker: The "marker" of where you are in the result
- set with respect to version-id's.
-
- :type delimiter: string
- :param delimiter: If this optional, Unicode string parameter
- is included with your request, then keys that
- contain the same string between the prefix and
- the first occurrence of the delimiter will be
- rolled up into a single result element in the
- CommonPrefixes collection. These rolled-up keys
- are not returned elsewhere in the response.
- :rtype: ResultSet
- :return: The result from S3 listing the keys requested
-
- """
- return self._get_all([('Version', self.key_class),
- ('CommonPrefixes', Prefix),
- ('DeleteMarker', DeleteMarker)],
- 'versions', headers, **params)
- def get_all_multipart_uploads(self, headers=None, **params):
- """
- A lower-level, version-aware method for listing active
- MultiPart uploads for a bucket. This closely models the
- actual S3 API and requires you to manually handle the paging
- of results. For a higher-level method that handles the
- details of paging for you, you can use the list method.
-
- :type max_uploads: int
- :param max_uploads: The maximum number of uploads to retrieve.
- Default value is 1000.
-
- :type key_marker: string
- :param key_marker: Together with upload_id_marker, this parameter
- specifies the multipart upload after which listing
- should begin. If upload_id_marker is not specified,
- only the keys lexicographically greater than the
- specified key_marker will be included in the list.
- If upload_id_marker is specified, any multipart
- uploads for a key equal to the key_marker might
- also be included, provided those multipart uploads
- have upload IDs lexicographically greater than the
- specified upload_id_marker.
-
- :type upload_id_marker: string
- :param upload_id_marker: Together with key-marker, specifies
- the multipart upload after which listing
- should begin. If key_marker is not specified,
- the upload_id_marker parameter is ignored.
- Otherwise, any multipart uploads for a key
- equal to the key_marker might be included
- in the list only if they have an upload ID
- lexicographically greater than the specified
- upload_id_marker.
-
- :rtype: ResultSet
- :return: The result from S3 listing the uploads requested
-
- """
- return self._get_all([('Upload', MultiPartUpload),
- ('CommonPrefixes', Prefix)],
- 'uploads', headers, **params)
- def new_key(self, key_name=None):
- """
- Creates a new key
-
- :type key_name: string
- :param key_name: The name of the key to create
-
- :rtype: :class:`boto.s3.key.Key` or subclass
- :returns: An instance of the newly created key object
- """
- return self.key_class(self, key_name)
- def generate_url(self, expires_in, method='GET', headers=None,
- force_http=False, response_headers=None,
- expires_in_absolute=False):
- return self.connection.generate_url(expires_in, method, self.name,
- headers=headers,
- force_http=force_http,
- response_headers=response_headers,
- expires_in_absolute=expires_in_absolute)
- def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
- """
- Deletes a set of keys using S3's Multi-object delete API. If a
- VersionID is specified for that key then that version is removed.
- Returns a MultiDeleteResult Object, which contains Deleted
- and Error elements for each key you ask to delete.
-
- :type keys: list
- :param keys: A list of either key_names or (key_name, versionid) pairs
- or a list of Key instances.
- :type quiet: boolean
- :param quiet: In quiet mode the response includes only keys where
- the delete operation encountered an error. For a
- successful deletion, the operation does not return
- any information about the delete in the response body.
- :type mfa_token: tuple or list of strings
- :param mfa_token: A tuple or list consisting of the serial number
- from the MFA device and the current value of
- the six-digit token associated with the device.
- This value is required anytime you are
- deleting versioned objects from a bucket
- that has the MFADelete option on the bucket.
- :returns: An instance of MultiDeleteResult
- """
- ikeys = iter(keys)
- result = MultiDeleteResult(self)
- provider = self.connection.provider
- query_args = 'delete'
- def delete_keys2(hdrs):
- hdrs = hdrs or {}
- data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
- data += u"<Delete>"
- if quiet:
- data += u"<Quiet>true</Quiet>"
- count = 0
- while count < 1000:
- try:
- key = ikeys.next()
- except StopIteration:
- break
- if isinstance(key, basestring):
- key_name = key
- version_id = None
- elif isinstance(key, tuple) and len(key) == 2:
- key_name, version_id = key
- elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
- key_name = key.name
- version_id = key.version_id
- else:
- if isinstance(key, Prefix):
- key_name = key.name
- code = 'PrefixSkipped' # Don't delete Prefix
- else:
- key_name = repr(key) # try get a string
- code = 'InvalidArgument' # other unknown type
- message = 'Invalid. No delete action taken for this object.'
- error = Error(key_name, code=code, message=message)
- result.errors.append(error)
- continue
- count += 1
- #key_name = key_name.decode('utf-8')
- data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
- if version_id:
- data += u"<VersionId>%s</VersionId>" % version_id
- data += u"</Object>"
- data += u"</Delete>"
- if count <= 0:
- return False # no more
- data = data.encode('utf-8')
- fp = StringIO.StringIO(data)
- md5 = boto.utils.compute_md5(fp)
- hdrs['Content-MD5'] = md5[1]
- hdrs['Content-Type'] = 'text/xml'
- if mfa_token:
- hdrs[provider.mfa_header] = ' '.join(mfa_token)
- response = self.connection.make_request('POST', self.name,
- headers=hdrs,
- query_args=query_args,
- data=data)
- body = response.read()
- if response.status == 200:
- h = handler.XmlHandler(result, self)
- xml.sax.parseString(body, h)
- return count >= 1000 # more?
- else:
- raise provider.storage_response_error(response.status,
- response.reason,
- body)
- while delete_keys2(headers):
- pass
- return result
- def delete_key(self, key_name, headers=None,
- version_id=None, mfa_token=None):
- """
- Deletes a key from the bucket. If a version_id is provided,
- only that version of the key will be deleted.
-
- :type key_name: string
- :param key_name: The key name to delete
- :type version_id: string
- :param version_id: The version ID (optional)
-
- :type mfa_token: tuple or list of strings
- :param mfa_token: A tuple or list consisting of the serial number
- from the MFA device and the current value of
- the six-digit token associated with the device.
- This value is required anytime you are
- deleting versioned objects from a bucket
- that has the MFADelete option on the bucket.
- :rtype: :class:`boto.s3.key.Key` or subclass
- :returns: A key object holding information on what was deleted.
- The Caller can see if a delete_marker was created or
- removed and what version_id the delete created or removed.
- """
- provider = self.connection.provider
- if version_id:
- query_args = 'versionId=%s' % version_id
- else:
- query_args = None
- if mfa_token:
- if not headers:
- headers = {}
- headers[provider.mfa_header] = ' '.join(mfa_token)
- response = self.connection.make_request('DELETE', self.name, key_name,
- headers=headers,
- query_args=query_args)
- body = response.read()
- if response.status != 204:
- raise provider.storage_response_error(response.status,
- response.reason, body)
- else:
- # return a key object with information on what was deleted.
- k = self.key_class(self)
- k.name = key_name
- k.handle_version_headers(response)
- return k
- def copy_key(self, new_key_name, src_bucket_name,
- src_key_name, metadata=None, src_version_id=None,
- storage_class='STANDARD', preserve_acl=False,
- encrypt_key=False, headers=None, query_args=None):
- """
- Create a new key in the bucket by copying another existing key.
- :type new_key_name: string
- :param new_key_name: The name of the new key
- :type src_bucket_name: string
- :param src_bucket_name: The name of the source bucket
- :type src_key_name: string
- :param src_key_name: The name of the source key
- :type src_version_id: string
- :param src_version_id: The version id for the key. This param
- is optional. If not specified, the newest
- version of the key will be copied.
- :type metadata: dict
- :param metadata: Metadata to be associated with new key.
- If metadata is supplied, it will replace the
- metadata of the source key being copied.
- If no metadata is supplied, the source key's
- metadata will be copied to the new key.
- :type storage_class: string
- :param storage_class: The storage class of the new key.
- By default, the new key will use the
- standard storage class. Possible values are:
- STANDARD | REDUCED_REDUNDANCY
- :type preserve_acl: bool
- :param preserve_acl: If True, the ACL from the source key
- will be copied to the destination
- key. If False, the destination key
- will have the default ACL.
- Note that preserving the ACL in the
- new key object will require two
- additional API calls to S3, one to
- retrieve the current ACL and one to
- set that ACL on the new object. If
- you don't care about the ACL, a value
- of False will be significantly more
- efficient.
- :type encrypt_key: bool
- :param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
- :type headers: dict
- :param headers: A dictionary of header name/value pairs.
- :type query_args: string
- :param query_args: A string of additional querystring arguments
- to append to the request
- :rtype: :class:`boto.s3.key.Key` or subclass
- :returns: An instance of the newly created key object
- """
- headers = headers or {}
- provider = self.connection.provider
- src_key_name = boto.utils.get_utf8_value(src_key_name)
- if preserve_acl:
- if self.name == src_bucket_name:
- src_bucket = self
- else:
- src_bucket = self.connection.get_bucket(src_bucket_name)
- acl = src_bucket.get_xml_acl(src_key_name)
- if encrypt_key:
- headers[provider.server_side_encryption_header] = 'AES256'
- src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
- if src_version_id:
- src += '?versionId=%s' % src_version_id
- headers[provider.copy_source_header] = str(src)
- # make sure storage_class_header key exists before accessing it
- if provider.storage_class_header and storage_class:
- headers[provider.storage_class_header] = storage_class
- if metadata:
- headers[provider.metadata_directive_header] = 'REPLACE'
- headers = boto.utils.merge_meta(headers, metadata, provider)
- elif not query_args: # Can't use this header with multi-part copy.
- headers[provider.metadata_directive_header] = 'COPY'
- response = self.connection.make_request('PUT', self.name, new_key_name,
- headers=headers,
- query_args=query_args)
- body = response.read()
- if response.status == 200:
- key = self.new_key(new_key_name)
- h = handler.XmlHandler(key, self)
- xml.sax.parseString(body, h)
- if hasattr(key, 'Error'):
- raise provider.storage_copy_error(key.Code, key.Message, body)
- key.handle_version_headers(response)
- if preserve_acl:
- self.set_xml_acl(acl, new_key_name)
- return key
- else:
- raise provider.storage_response_error(response.status,
- response.reason, body)
- def set_canned_acl(self, acl_str, key_name='', headers=None,
- version_id=None):
- assert acl_str in CannedACLStrings
- if headers:
- headers[self.connection.provider.acl_header] = acl_str
- else:
- headers={self.connection.provider.acl_header: acl_str}
- query_args = 'acl'
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('PUT', self.name, key_name,
- headers=headers, query_args=query_args)
- body = response.read()
- if response.status != 200:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def get_xml_acl(self, key_name='', headers=None, version_id=None):
- query_args = 'acl'
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('GET', self.name, key_name,
- query_args=query_args,
- headers=headers)
- body = response.read()
- if response.status != 200:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- return body
- def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
- query_args='acl'):
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('PUT', self.name, key_name,
- data=acl_str.encode('UTF-8'),
- query_args=query_args,
- headers=headers)
- body = response.read()
- if response.status != 200:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
- if isinstance(acl_or_str, Policy):
- self.set_xml_acl(acl_or_str.to_xml(), key_name,
- headers, version_id)
- else:
- self.set_canned_acl(acl_or_str, key_name,
- headers, version_id)
- def get_acl(self, key_name='', headers=None, version_id=None):
- query_args = 'acl'
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('GET', self.name, key_name,
- query_args=query_args,
- headers=headers)
- body = response.read()
- if response.status == 200:
- policy = Policy(self)
- h = handler.XmlHandler(policy, self)
- xml.sax.parseString(body, h)
- return policy
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def set_subresource(self, subresource, value, key_name = '', headers=None,
- version_id=None):
- """
- Set a subresource for a bucket or key.
- :type subresource: string
- :param subresource: The subresource to set.
- :type value: string
- :param value: The value of the subresource.
- :type key_name: string
- :param key_name: The key to operate on, or None to operate on the
- bucket.
- :type headers: dict
- :param headers: Additional HTTP headers to include in the request.
- :type src_version_id: string
- :param src_version_id: Optional. The version id of the key to operate
- on. If not specified, operate on the newest
- version.
- """
- if not subresource:
- raise TypeError('set_subresource called with subresource=None')
- query_args = subresource
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('PUT', self.name, key_name,
- data=value.encode('UTF-8'),
- query_args=query_args,
- headers=headers)
- body = response.read()
- if response.status != 200:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def get_subresource(self, subresource, key_name='', headers=None,
- version_id=None):
- """
- Get a subresource for a bucket or key.
- :type subresource: string
- :param subresource: The subresource to get.
- :type key_name: string
- :param key_name: The key to operate on, or None to operate on the
- bucket.
- :type headers: dict
- :param headers: Additional HTTP headers to include in the request.
- :type src_version_id: string
- :param src_version_id: Optional. The version id of the key to operate
- on. If not specified, operate on the newest
- version.
- :rtype: string
- :returns: The value of the subresource.
- """
- if not subresource:
- raise TypeError('get_subresource called with subresource=None')
- query_args = subresource
- if version_id:
- query_args += '&versionId=%s' % version_id
- response = self.connection.make_request('GET', self.name, key_name,
- query_args=query_args,
- headers=headers)
- body = response.read()
- if response.status != 200:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- return body
- def make_public(self, recursive=False, headers=None):
- self.set_canned_acl('public-read', headers=headers)
- if recursive:
- for key in self:
- self.set_canned_acl('public-read', key.name, headers=headers)
- def add_email_grant(self, permission, email_address,
- recursive=False, headers=None):
- """
- Convenience method that provides a quick way to add an email grant
- to a bucket. This method retrieves the current ACL, creates a new
- grant based on the parameters passed in, adds that grant to the ACL
- and then PUT's the new ACL back to S3.
-
- :type permission: string
- :param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
-
- :type email_address: string
- :param email_address: The email address associated with the AWS
- account your are granting the permission to.
-
- :type recursive: boolean
- :param recursive: A boolean value to controls whether the command
- will apply the grant to all keys within the bucket
- or not. The default value is False. By passing a
- True value, the call will iterate through all keys
- in the bucket and apply the same grant to each key.
- CAUTION: If you have a lot of keys, this could take
- a long time!
- """
- if permission not in S3Permissions:
- raise self.connection.provider.storage_permissions_error(
- 'Unknown Permission: %s' % permission)
- policy = self.get_acl(headers=headers)
- policy.acl.add_email_grant(permission, email_address)
- self.set_acl(policy, headers=headers)
- if recursive:
- for key in self:
- key.add_email_grant(permission, email_address, headers=headers)
- def add_user_grant(self, permission, user_id, recursive=False,
- headers=None, display_name=None):
- """
- Convenience method that provides a quick way to add a canonical
- user grant to a bucket. This method retrieves the current ACL,
- creates a new grant based on the parameters passed in, adds that
- grant to the ACL and then PUT's the new ACL back to S3.
-
- :type permission: string
- :param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
-
- :type user_id: string
- :param user_id: The canonical user id associated with the AWS
- account your are granting the permission to.
-
- :type recursive: boolean
- :param recursive: A boolean value to controls whether the command
- will apply the grant to all keys within the bucket
- or not. The default value is False. By passing a
- True value, the call will iterate through all keys
- in the bucket and apply the same grant to each key.
- CAUTION: If you have a lot of keys, this could take
- a long time!
-
- :type display_name: string
- :param display_name: An option string containing the user's
- Display Name. Only required on Walrus.
- """
- if permission not in S3Permissions:
- raise self.connection.provider.storage_permissions_error(
- 'Unknown Permission: %s' % permission)
- policy = self.get_acl(headers=headers)
- policy.acl.add_user_grant(permission, user_id,
- display_name=display_name)
- self.set_acl(policy, headers=headers)
- if recursive:
- for key in self:
- key.add_user_grant(permission, user_id, headers=headers,
- display_name=display_name)
- def list_grants(self, headers=None):
- policy = self.get_acl(headers=headers)
- return policy.acl.grants
- def get_location(self):
- """
- Returns the LocationConstraint for the bucket.
- :rtype: str
- :return: The LocationConstraint for the bucket or the empty
- string if no constraint was specified when bucket
- was created.
- """
- response = self.connection.make_request('GET', self.name,
- query_args='location')
- body = response.read()
- if response.status == 200:
- rs = ResultSet(self)
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs.LocationConstraint
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def set_xml_logging(self, logging_str, headers=None):
- """
- Set logging on a bucket directly to the given xml string.
- :type logging_str: unicode string
- :param logging_str: The XML for the bucketloggingstatus which will be set.
- The string will be converted to utf-8 before it is sent.
- Usually, you will obtain this XML from the BucketLogging
- object.
- :rtype: bool
- :return: True if ok or raises an exception.
- """
- body = logging_str.encode('utf-8')
- response = self.connection.make_request('PUT', self.name, data=body,
- query_args='logging', headers=headers)
- body = response.read()
- if response.status == 200:
- return True
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
-
- def enable_logging(self, target_bucket, target_prefix='', grants=None, headers=None):
- """
- Enable logging on a bucket.
- :type target_bucket: bucket or string
- :param target_bucket: The bucket to log to.
- :type target_prefix: string
- :param target_prefix: The prefix which should be prepended to the
- generated log files written to the target_bucket.
- :type grants: list of Grant objects
- :param grants: A list of extra permissions which will be granted on
- the log files which are created.
- :rtype: bool
- :return: True if ok or raises an exception.
- """
- if isinstance(target_bucket, Bucket):
- target_bucket = target_bucket.name
- blogging = BucketLogging(target=target_bucket, prefix=target_prefix, grants=grants)
- return self.set_xml_logging(blogging.to_xml(), headers=headers)
-
- def disable_logging(self, headers=None):
- """
- Disable logging on a bucket.
- :rtype: bool
- :return: True if ok or raises an exception.
- """
- blogging = BucketLogging()
- return self.set_xml_logging(blogging.to_xml(), headers=headers)
- def get_logging_status(self, headers=None):
- """
- Get the logging status for this bucket.
- :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
- :return: A BucketLogging object for this bucket.
- """
- response = self.connection.make_request('GET', self.name,
- query_args='logging', headers=headers)
- body = response.read()
- if response.status == 200:
- blogging = BucketLogging()
- h = handler.XmlHandler(blogging, self)
- xml.sax.parseString(body, h)
- return blogging
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def set_as_logging_target(self, headers=None):
- """
- Setup the current bucket as a logging target by granting the necessary
- permissions to the LogDelivery group to write log files to this bucket.
- """
- policy = self.get_acl(headers=headers)
- g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
- g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
- policy.acl.add_grant(g1)
- policy.acl.add_grant(g2)
- self.set_acl(policy, headers=headers)
- def get_request_payment(self, headers=None):
- response = self.connection.make_request('GET', self.name,
- query_args='requestPayment', headers=headers)
- body = response.read()
- if response.status == 200:
- return body
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
- def set_request_payment(self, payer='BucketOwner', headers=None):
- body = self.BucketPaymentBody % payer
- response = self.connection.make_request('PUT', self.name, data=body,
- query_args='requestPayment', headers=headers)
- body = response.read()
- if response.status == 200:
- return True
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
-
- def configure_versioning(self, versioning, mfa_delete=False,
- mfa_token=None, headers=None):
- """
- Configure versioning for this bucket.
-
- ..note:: This feature is currently in beta.
-
- :type versioning: bool
- :param versioning: A boolean indicating whether version is
- enabled (True) or disabled (False).
- :type mfa_delete: bool
- :param mfa_delete: A boolean indicating whether the Multi-Factor
- Authentication Delete feature is enabled (True)
- or disabled (False). If mfa_delete is enabled
- then all Delete operations will require the
- token from your MFA device to be passed in
- the request.
- :type mfa_token: tuple or list of strings
- :param mfa_token: A tuple or list consisting of the serial number
- from the MFA device and the current value of
- the six-digit token associated with the device.
- This value is required when you are changing
- the status of the MfaDelete property of
- the bucket.
- """
- if versioning:
- ver = 'Enabled'
- else:
- ver = 'Suspended'
- if mfa_delete:
- mfa = 'Enabled'
- else:
- mfa = 'Disabled'
- body = self.VersioningBody % (ver, mfa)
- if mfa_token:
- if not headers:
- headers = {}
- provider = self.connection.provider
- headers[provider.mfa_header] = ' '.join(mfa_token)
- response = self.connection.make_request('PUT', self.name, data=body,
- query_args='versioning', headers=headers)
- body = response.read()
- if response.status == 200:
- return True
- else:
- raise self.connection.provider.storage_response_error(
- response.sta…
Large files files are truncated, but you can click here to view the full file