Skip to content

Commit

Permalink
Automatically empty buckets in Lambda examples (#4453)
Browse files Browse the repository at this point in the history
  • Loading branch information
rdettai authored Jan 31, 2024
1 parent 809ddd7 commit d1ce847
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 9 deletions.
7 changes: 5 additions & 2 deletions distribution/lambda/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,17 @@ deploy-mock-data: package check-env

# address https://github.com/aws/aws-cdk/issues/20060
before-destroy:
mkdir -p cdk.out
touch $(INDEXER_PACKAGE_PATH)
touch $(SEARCHER_PACKAGE_PATH)

destroy-hdfs: before-destroy
cdk destroy -a cdk/app.py HdfsStack
python -c 'from cdk import cli; cli.empty_hdfs_bucket()'
cdk destroy --force -a cdk/app.py HdfsStack

destroy-mock-data: before-destroy
cdk destroy -a cdk/app.py MockDataStack
python -c 'from cdk import cli; cli.empty_mock_data_buckets()'
cdk destroy --force -a cdk/app.py MockDataStack

clean:
rm -rf cdk.out
Expand Down
37 changes: 30 additions & 7 deletions distribution/lambda/cdk/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,13 +269,16 @@ def get_logs(


def download_logs_to_file(request_id: str, function_name: str, invoke_start: float):
with open(f"lambda.{request_id}.log", "w") as f:
for log in get_logs(
function_name,
request_id,
int(invoke_start * 1000),
):
f.write(log)
try:
with open(f"lambda.{request_id}.log", "w") as f:
for log in get_logs(
function_name,
request_id,
int(invoke_start * 1000),
):
f.write(log)
except Exception as e:
print(f"Failed to download logs: {e}")


def invoke_mock_data_searcher():
Expand All @@ -288,11 +291,31 @@ def invoke_mock_data_searcher():


def _clean_s3_bucket(bucket_name: str, prefix: str = ""):
print(f"Cleaning up bucket {bucket_name}/{prefix}...")
s3 = session.resource("s3")
bucket = s3.Bucket(bucket_name)
bucket.objects.filter(Prefix=prefix).delete()


def empty_hdfs_bucket():
bucket_name = _get_cloudformation_output_value(
app.HDFS_STACK_NAME, hdfs_stack.INDEX_STORE_BUCKET_NAME_EXPORT_NAME
)

_clean_s3_bucket(bucket_name)


def empty_mock_data_buckets():
bucket_name = _get_cloudformation_output_value(
app.MOCK_DATA_STACK_NAME, mock_data_stack.INDEX_STORE_BUCKET_NAME_EXPORT_NAME
)
_clean_s3_bucket(bucket_name)
bucket_name = _get_cloudformation_output_value(
app.MOCK_DATA_STACK_NAME, mock_data_stack.SOURCE_BUCKET_NAME_EXPORT_NAME
)
_clean_s3_bucket(bucket_name)


@cache
def _git_commit():
return subprocess.run(
Expand Down
14 changes: 14 additions & 0 deletions distribution/lambda/cdk/stacks/examples/mock_data_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
from ..services.quickwit_service import QuickwitService

SEARCHER_FUNCTION_NAME_EXPORT_NAME = "mock-data-searcher-function-name"
INDEX_STORE_BUCKET_NAME_EXPORT_NAME = "mock-data-index-store-bucket-name"
SOURCE_BUCKET_NAME_EXPORT_NAME = "mock-data-source-bucket-name"


class Source(Construct):
Expand Down Expand Up @@ -66,6 +68,12 @@ def __init__(
mock_data_bucket.add_object_created_notification(
aws_s3_notifications.LambdaDestination(qw_svc.indexer.lambda_function)
)
aws_cdk.CfnOutput(
self,
"source-bucket-name",
value=mock_data_bucket.bucket_name,
export_name=SOURCE_BUCKET_NAME_EXPORT_NAME,
)


class SearchAPI(Construct):
Expand Down Expand Up @@ -164,6 +172,12 @@ def __init__(
api_key=search_api_key,
)

aws_cdk.CfnOutput(
self,
"index-store-bucket-name",
value=qw_svc.bucket.bucket_name,
export_name=INDEX_STORE_BUCKET_NAME_EXPORT_NAME,
)
aws_cdk.CfnOutput(
self,
"searcher-function-name",
Expand Down
2 changes: 2 additions & 0 deletions docs/guides/e2e-serverless-aws-lambda.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,9 @@ curl -d '{"query":"quantity:>5", "max_hits": 10}' \
--compressed
```

:::note
The index is not created until the first run of the Indexer, so you might need a few minutes before your first search request succeeds. The API Gateway key configuration also takes a minute or two to propagate, so the first requests might receive an authorization error response.
:::

Because the JSON query responses are often quite verbose, the Searcher Lambda always compresses them before sending them on the wire. It is crucial to keep this size low, both to avoid hitting the Lambda payload size limit of 6MB and to avoid egress costs at around $0.10/GB. We do this regardless of the `accept-encoding` request header, this is why the `--compressed` flag needs to be set to `curl`.

Expand Down

0 comments on commit d1ce847

Please sign in to comment.