Skip to content

Commit

Permalink
add life cycle to s3 express
Browse files Browse the repository at this point in the history
  • Loading branch information
TingDaoK committed Feb 12, 2025
1 parent f92f250 commit d2b5dbc
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 31 deletions.
15 changes: 7 additions & 8 deletions tests/mock_s3_server/mock_s3_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,12 @@ def resolve_response(self, wrapper, request_type, chunked=False, head_request=Fa

# If request_headers is present, validate that the request contains all required headers
if 'request_headers' in data:
for header in data['request_headers']:
header_bytes = header.encode('utf-8')
if not any(header_bytes == h[0] for h in self.request_headers):
response = Response(status_code=500, delay=0, headers=headers,
data=json.dumps({'error': f"Missing required header: {header}"}), chunked=chunked, head_request=head_request)
return response
for header in data['request_headers']:
header_bytes = header.encode('utf-8')
if not any(header_bytes == h[0] for h in self.request_headers):
response = Response(status_code=500, delay=0, headers=headers,
data=json.dumps({'error': f"Missing required header: {header}"}), chunked=chunked, head_request=head_request)
return response

# if response has delay, then sleep before sending it
delay = data.get('delay', 0)
Expand Down Expand Up @@ -411,7 +411,6 @@ def handle_get_object_modified(start_range, end_range, request):
else:
# Check the request header to make sure "If-Match" is set
etag = get_request_header_value(request, "if-match")
print(etag)
# fetch Etag from the first_part response file
response_file = os.path.join(
base_dir, S3Opts.GetObject.name, f"get_object_modified_first_part.json")
Expand All @@ -436,7 +435,7 @@ def handle_get_object(wrapper, request, parsed_path, head_request=False):

if (parsed_path.path == "/get_object_invalid_response_missing_content_range" or
parsed_path.path == "/get_object_invalid_response_missing_etags" or
parsed_path.path == "/get_object_long_error"):
parsed_path.path == "/get_object_long_error"):
# Don't generate the body for those requests
return response_config

Expand Down
74 changes: 51 additions & 23 deletions tests/test_helper/test_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
import json
import boto3
import botocore
import sys
Expand Down Expand Up @@ -134,31 +135,58 @@ def create_bucket_with_lifecycle(availability_zone=None, client=s3_client):
create_bucket(client,
Bucket=bucket_name,
CreateBucketConfiguration=bucket_config)
print(f"s3://{bucket_name} - Configuring bucket...")
if availability_zone is None:
print(f"s3://{bucket_name} - Configuring bucket...")
client.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration={
'Rules': [
{
'ID': 'clean up non-pre-existing objects',
'Expiration': {
'Days': 1,
},
'Filter': {
'Prefix': 'upload/',
},
'Status': 'Enabled',
'NoncurrentVersionExpiration': {
'NoncurrentDays': 1,
},
'AbortIncompleteMultipartUpload': {
'DaysAfterInitiation': 1,
},
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-lifecycle.html#directory-bucket-lifecycle-differences
# S3 express requires a bucket policy to allow session-based access to perform lifecycle actions
account_id = boto3.client(
'sts').get_caller_identity().get('Account')
bucket_policy = {
"Version": "2008-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lifecycle.s3.amazonaws.com"
},
],
},
)
"Action": "s3express:CreateSession",
"Condition": {
"StringEquals": {
"s3express:SessionMode": "ReadWrite"
}
},
"Resource": [
f"arn:aws:s3express:{REGION}:{account_id}:bucket/{bucket_name}"
]
}
]
}
client.put_bucket_policy(
Bucket=bucket_name, Policy=json.dumps(bucket_policy))

client.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration={
'Rules': [
{
'ID': 'clean up non-pre-existing objects',
'Expiration': {
'Days': 1,
},
'Filter': {
'Prefix': 'upload/',
},
'Status': 'Enabled',
'NoncurrentVersionExpiration': {
'NoncurrentDays': 1,
},
'AbortIncompleteMultipartUpload': {
'DaysAfterInitiation': 1,
},
},
],
},
)

put_pre_existing_objects(
10*MB, 'pre-existing-10MB', bucket=bucket_name, client=client)
Expand Down

0 comments on commit d2b5dbc

Please sign in to comment.