Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SVCS-826] S3 Compatablilty #351

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
agent==0.1.2
aiohttp==0.18.2
git+https://github.com/felliott/boto.git@feature/gen-url-query-params-6#egg=boto
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to remove old boto

boto3==1.7.36
celery==3.1.17
furl==0.4.2
google-auth==1.4.1
Expand Down
18 changes: 10 additions & 8 deletions waterbutler/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,17 @@ async def send_signed_request(method, url, payload):
))


def normalize_datetime(date_string):
if date_string is None:
def normalize_datetime(date):
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not always a string, so saying date_str here is a little misleading.

if date is None:
return None
parsed_datetime = dateutil.parser.parse(date_string)
if not parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.replace(tzinfo=pytz.UTC)
parsed_datetime = parsed_datetime.astimezone(tz=pytz.UTC)
parsed_datetime = parsed_datetime.replace(microsecond=0)
return parsed_datetime.isoformat()
if isinstance(date, str):
date = dateutil.parser.parse(date)
if not date.tzinfo:
date = date.replace(tzinfo=pytz.UTC)
date = date.astimezone(tz=pytz.UTC)
date = date.replace(microsecond=0)
return date.isoformat()



class ZipStreamGenerator:
Expand Down
26 changes: 16 additions & 10 deletions waterbutler/providers/s3/metadata.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os

from waterbutler.core import metadata
from waterbutler.core import utils


class S3Metadata(metadata.BaseMetadata):
Expand All @@ -18,42 +19,47 @@ class S3FileMetadataHeaders(S3Metadata, metadata.BaseFileMetadata):

def __init__(self, path, headers):
self._path = path
self.obj = headers
self._etag = None
# Cast to dict to clone as the headers will
# be destroyed when the request leaves scope
super().__init__(dict(headers))
super().__init__(headers)

@property
def path(self):
return '/' + self._path

@property
def size(self):
return self.raw['CONTENT-LENGTH']
return self.obj.content_length

@property
def content_type(self):
return self.raw['CONTENT-TYPE']
return self.obj.content_type

@property
def modified(self):
return self.raw['LAST-MODIFIED']
return utils.normalize_datetime(self.obj.last_modified)

@property
def created_utc(self):
return None

@property
def etag(self):
return self.raw['ETAG'].replace('"', '')
if self._etag:
return self._etag
else:
self._etag = self.obj.e_tag.replace('"', '')
return self._etag

@property
def extra(self):
md5 = self.raw['ETAG'].replace('"', '')
return {
'md5': md5,
'encryption': self.raw.get('X-AMZ-SERVER-SIDE-ENCRYPTION', ''),
'md5': self.etag,
'encryption': self.obj.server_side_encryption,
'hashes': {
'md5': md5,
'md5': self.etag,
},
}

Expand All @@ -70,7 +76,7 @@ def size(self):

@property
def modified(self):
return self.raw['LastModified']
return self.raw['LastModified'].isoformat()

@property
def created_utc(self):
Expand Down
152 changes: 88 additions & 64 deletions waterbutler/providers/s3/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to remove old boto


import boto3
from botocore.client import Config
from botocore.exceptions import ClientError

from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
Expand Down Expand Up @@ -45,7 +49,7 @@ class S3Provider(provider.BaseProvider):
NAME = 's3'

def __init__(self, auth, credentials, settings):
"""
"""Initialize S3Provider
.. note::

Neither `S3Connection#__init__` nor `S3Connection#get_bucket`
Expand All @@ -57,10 +61,30 @@ def __init__(self, auth, credentials, settings):
"""
super().__init__(auth, credentials, settings)

self.connection = S3Connection(credentials['access_key'],
credentials['secret_key'], calling_format=OrdinaryCallingFormat())
self.s3 = boto3.resource(
's3',
endpoint_url='http://{}:{}'.format(
credentials['host'],
credentials['port']
),
aws_access_key_id=credentials['access_key'],
aws_secret_access_key=credentials['secret_key'],
config=Config(signature_version='s3v4'),
region_name='us-east-1'
)

self.connection = S3Connection(
credentials['access_key'],
credentials['secret_key'],
calling_format=OrdinaryCallingFormat(),
host=credentials['host'],
port=credentials['port'],
is_secure=False
)
self.bucket_name = settings['bucket']
self.bucket = self.connection.get_bucket(settings['bucket'], validate=False)
self.encrypt_uploads = self.settings.get('encrypt_uploads', False)
self.encrypt_uploads = False
self.region = None

async def validate_v1_path(self, path, **kwargs):
Expand Down Expand Up @@ -347,27 +371,29 @@ async def revisions(self, path, **kwargs):
:rtype list:
"""
await self._check_region()

query_params = {'prefix': path.path, 'delimiter': '/', 'versions': ''}
url = functools.partial(self.bucket.generate_url, settings.TEMP_URL_SECS, 'GET', query_parameters=query_params)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs to be fixed

resp = await self.make_request(
'GET',
url,
params=query_params,
expects=(200, ),
throws=exceptions.MetadataError,
)
content = await resp.read()
versions = xmltodict.parse(content)['ListVersionsResult'].get('Version') or []
try:
resp = await self.make_request(
'GET',
url,
params=query_params,
expects=(200, ),
throws=exceptions.MetadataError,
)
content = await resp.read()
versions = xmltodict.parse(content)['ListVersionsResult'].get('Version') or []

if isinstance(versions, dict):
versions = [versions]
if isinstance(versions, dict):
versions = [versions]

return [
S3Revision(item)
for item in versions
if item['Key'] == path.path
]
return [
S3Revision(item)
for item in versions
if item['Key'] == path.path
]
except:
return []

async def metadata(self, path, revision=None, **kwargs):
"""Get Metadata about the requested file or folder
Expand All @@ -394,9 +420,10 @@ async def create_folder(self, path, folder_precheck=True, **kwargs):
if (await self.exists(path)):
raise exceptions.FolderNamingConflict(path.name)

url = functools.partial(self.bucket.new_key(path.path).generate_url, settings.TEMP_URL_SECS, 'PUT')
async with self.request(
'PUT',
functools.partial(self.bucket.new_key(path.path).generate_url, settings.TEMP_URL_SECS, 'PUT'),
url,
skip_auto_headers={'CONTENT-TYPE'},
expects=(200, 201),
throws=exceptions.CreateFolderError
Expand All @@ -406,63 +433,60 @@ async def create_folder(self, path, folder_precheck=True, **kwargs):
async def _metadata_file(self, path, revision=None):
await self._check_region()

if revision == 'Latest':
revision = None
resp = await self.make_request(
'HEAD',
functools.partial(
self.bucket.new_key(path.path).generate_url,
settings.TEMP_URL_SECS,
'HEAD',
query_parameters={'versionId': revision} if revision else None
),
expects=(200, ),
throws=exceptions.MetadataError,
)
await resp.release()
return S3FileMetadataHeaders(path.path, resp.headers)
if (
revision == 'Latest' or
revision == '' or
not revision
):
obj = self.s3.Object(
self.bucket.name,
path.path
)
else:
obj = self.s3.ObjectVersion(
self.bucket.name,
path.path,
revision
)
try:
obj.load()
except ClientError as err:
if err.response['Error']['Code'] == '404':
raise exceptions.NotFoundError(str(path))
else:
raise err

async def _metadata_folder(self, path):
await self._check_region()
return S3FileMetadataHeaders(path.path, obj)

params = {'prefix': path.path, 'delimiter': '/'}
resp = await self.make_request(
'GET',
functools.partial(self.bucket.generate_url, settings.TEMP_URL_SECS, 'GET', query_parameters=params),
params=params,
expects=(200, ),
throws=exceptions.MetadataError,
async def _metadata_folder(self, path):
"""Get metadata about the contents of a bucket. This is either the
contents at the root of the bucket, or a folder has
been selected as a prefix by the user
"""
bucket = self.s3.Bucket(self.bucket_name)
result = bucket.meta.client.list_objects(
Bucket=bucket.name,
Prefix=path.path,
Delimiter='/'
)

contents = await resp.read()

parsed = xmltodict.parse(contents, strip_whitespace=False)['ListBucketResult']

contents = parsed.get('Contents', [])
prefixes = parsed.get('CommonPrefixes', [])
prefixes = result.get('CommonPrefixes', [])
contents = result.get('Contents', [])

if not contents and not prefixes and not path.is_root:
# If contents and prefixes are empty then this "folder"
# must exist as a key with a / at the end of the name
# if the path is root there is no need to test if it exists
resp = await self.make_request(
'HEAD',
functools.partial(self.bucket.new_key(path.path).generate_url, settings.TEMP_URL_SECS, 'HEAD'),
expects=(200, ),
throws=exceptions.MetadataError,
)
await resp.release()

obj = self.s3.Object(self.bucket_name, path.path)
obj.load()

if isinstance(contents, dict):
contents = [contents]

if isinstance(prefixes, dict):
prefixes = [prefixes]

items = [
S3FolderMetadata(item)
for item in prefixes
]
items = [S3FolderMetadata(item) for item in prefixes]

for content in contents:
if content['Key'] == path.path:
Expand All @@ -486,7 +510,7 @@ async def _check_region(self):

Region Naming: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
"""
if self.region is None:
if self.region is "Chewbacca":
self.region = await self._get_bucket_region()
if self.region == 'EU':
self.region = 'eu-west-1'
Expand Down