diff --git a/distribution/lambda/Makefile b/distribution/lambda/Makefile index 39cd723271f..fc4445a034a 100644 --- a/distribution/lambda/Makefile +++ b/distribution/lambda/Makefile @@ -67,14 +67,17 @@ deploy-mock-data: package check-env # address https://github.com/aws/aws-cdk/issues/20060 before-destroy: + mkdir -p cdk.out touch $(INDEXER_PACKAGE_PATH) touch $(SEARCHER_PACKAGE_PATH) destroy-hdfs: before-destroy - cdk destroy -a cdk/app.py HdfsStack + python -c 'from cdk import cli; cli.empty_hdfs_bucket()' + cdk destroy --force -a cdk/app.py HdfsStack destroy-mock-data: before-destroy - cdk destroy -a cdk/app.py MockDataStack + python -c 'from cdk import cli; cli.empty_mock_data_buckets()' + cdk destroy --force -a cdk/app.py MockDataStack clean: rm -rf cdk.out diff --git a/distribution/lambda/cdk/cli.py b/distribution/lambda/cdk/cli.py index ce2fe4cf75d..bfffc5f846a 100644 --- a/distribution/lambda/cdk/cli.py +++ b/distribution/lambda/cdk/cli.py @@ -269,13 +269,16 @@ def get_logs( def download_logs_to_file(request_id: str, function_name: str, invoke_start: float): - with open(f"lambda.{request_id}.log", "w") as f: - for log in get_logs( - function_name, - request_id, - int(invoke_start * 1000), - ): - f.write(log) + try: + with open(f"lambda.{request_id}.log", "w") as f: + for log in get_logs( + function_name, + request_id, + int(invoke_start * 1000), + ): + f.write(log) + except Exception as e: + print(f"Failed to download logs: {e}") def invoke_mock_data_searcher(): @@ -288,11 +291,31 @@ def invoke_mock_data_searcher(): def _clean_s3_bucket(bucket_name: str, prefix: str = ""): + print(f"Cleaning up bucket {bucket_name}/{prefix}...") s3 = session.resource("s3") bucket = s3.Bucket(bucket_name) bucket.objects.filter(Prefix=prefix).delete() +def empty_hdfs_bucket(): + bucket_name = _get_cloudformation_output_value( + app.HDFS_STACK_NAME, hdfs_stack.INDEX_STORE_BUCKET_NAME_EXPORT_NAME + ) + + _clean_s3_bucket(bucket_name) + + +def empty_mock_data_buckets(): + bucket_name = _get_cloudformation_output_value( + app.MOCK_DATA_STACK_NAME, mock_data_stack.INDEX_STORE_BUCKET_NAME_EXPORT_NAME + ) + _clean_s3_bucket(bucket_name) + bucket_name = _get_cloudformation_output_value( + app.MOCK_DATA_STACK_NAME, mock_data_stack.SOURCE_BUCKET_NAME_EXPORT_NAME + ) + _clean_s3_bucket(bucket_name) + + @cache def _git_commit(): return subprocess.run( diff --git a/distribution/lambda/cdk/stacks/examples/mock_data_stack.py b/distribution/lambda/cdk/stacks/examples/mock_data_stack.py index a54018a6d8d..79aa1eb20ef 100644 --- a/distribution/lambda/cdk/stacks/examples/mock_data_stack.py +++ b/distribution/lambda/cdk/stacks/examples/mock_data_stack.py @@ -15,6 +15,8 @@ from ..services.quickwit_service import QuickwitService SEARCHER_FUNCTION_NAME_EXPORT_NAME = "mock-data-searcher-function-name" +INDEX_STORE_BUCKET_NAME_EXPORT_NAME = "mock-data-index-store-bucket-name" +SOURCE_BUCKET_NAME_EXPORT_NAME = "mock-data-source-bucket-name" class Source(Construct): @@ -66,6 +68,12 @@ def __init__( mock_data_bucket.add_object_created_notification( aws_s3_notifications.LambdaDestination(qw_svc.indexer.lambda_function) ) + aws_cdk.CfnOutput( + self, + "source-bucket-name", + value=mock_data_bucket.bucket_name, + export_name=SOURCE_BUCKET_NAME_EXPORT_NAME, + ) class SearchAPI(Construct): @@ -164,6 +172,12 @@ def __init__( api_key=search_api_key, ) + aws_cdk.CfnOutput( + self, + "index-store-bucket-name", + value=qw_svc.bucket.bucket_name, + export_name=INDEX_STORE_BUCKET_NAME_EXPORT_NAME, + ) aws_cdk.CfnOutput( self, "searcher-function-name", diff --git a/docs/guides/e2e-serverless-aws-lambda.md b/docs/guides/e2e-serverless-aws-lambda.md index 0b97481b979..0a46caec7c2 100644 --- a/docs/guides/e2e-serverless-aws-lambda.md +++ b/docs/guides/e2e-serverless-aws-lambda.md @@ -143,7 +143,9 @@ curl -d '{"query":"quantity:>5", "max_hits": 10}' \ --compressed ``` +:::note The index is not created until the first run of the Indexer, so you might need a few minutes before your first search request succeeds. The API Gateway key configuration also takes a minute or two to propagate, so the first requests might receive an authorization error response. +::: Because the JSON query responses are often quite verbose, the Searcher Lambda always compresses them before sending them on the wire. It is crucial to keep this size low, both to avoid hitting the Lambda payload size limit of 6MB and to avoid egress costs at around $0.10/GB. We do this regardless of the `accept-encoding` request header, this is why the `--compressed` flag needs to be set to `curl`.