Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/HHS/simpler-grants-gov into…
Browse files Browse the repository at this point in the history
… 2673/users-token-sub-endpoint
  • Loading branch information
babebe committed Nov 21, 2024
2 parents aceb260 + 873c776 commit 3f5934f
Show file tree
Hide file tree
Showing 19 changed files with 349 additions and 40 deletions.
32 changes: 32 additions & 0 deletions .github/workflows/cd-metabase.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
name: Deploy Metabase
run-name: Deploy ${{ github.ref_name }} to Metabase ${{ inputs.environment || (github.event_name == 'release' && 'prod') || 'nonprod'}}

on:
workflow_dispatch:
inputs:
environment:
description: "target environment"
required: true
default: "dev"
type: choice
options:
- dev
- staging
- prod
image-tag:
description: "Metabase enterprise image tag to deploy"
required: true
type: string

jobs:
deploy:
name: Deploy
uses: ./.github/workflows/deploy-metabase.yml
strategy:
max-parallel: 1
fail-fast: false
matrix:
envs: ${{ github.event_name == 'release' && fromJSON('["prod"]') || github.ref_name == 'main' && fromJSON('["dev", "staging"]') || fromJSON('["dev"]') }}
with:
version: ${{ inputs.image-tag }}
environment: ${{ matrix.envs }}
39 changes: 39 additions & 0 deletions .github/workflows/deploy-metabase.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: Chained Deploy
run-name: Chained Deploy Layer for ${{ github.ref_name }} to Metabase ${{ inputs.environment || (github.event_name == 'release' && 'prod') || 'nonprod' }}

on:
workflow_call:
inputs:
environment:
description: "the name of the application environment (e.g. dev, staging, prod)"
required: true
type: string
version:
description: "git reference to deploy (e.g., a branch, tag, or commit SHA)"
required: true
type: string

concurrency: cd-${{ inputs.environment }}

jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v4
- uses: hashicorp/setup-terraform@v3
with:
terraform_version: 1.9.7
terraform_wrapper: false

- name: Configure AWS credentials
uses: ./.github/actions/configure-aws-credentials
with:
app_name: analytics
environment: ${{ inputs.environment }}

- name: Deploy metabase
run: make metabase-deploy APP_NAME=metabase ENVIRONMENT=${{ inputs.environment }} IMAGE_TAG=${{ inputs.version }}
12 changes: 12 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,13 @@ infra-update-app-service: ## Create or update $APP_NAME's web service module
terraform -chdir="infra/$(APP_NAME)/service" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend"
terraform -chdir="infra/$(APP_NAME)/service" apply -var="environment_name=$(ENVIRONMENT)"

infra-update-metabase-service: ## Create or update $APP_NAME's web service module
# APP_NAME has a default value defined above, but check anyways in case the default is ever removed
@:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code)
@:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging")
terraform -chdir="infra/analytics/metabase" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend"
terraform -chdir="infra/analytics/metabase" apply -var="environment_name=$(ENVIRONMENT)"

# The prerequisite for this rule is obtained by
# prefixing each module with the string "infra-validate-module-"
infra-validate-modules: $(patsubst %, infra-validate-module-%, $(MODULES)) ## Run terraform validate on reusable child modules
Expand Down Expand Up @@ -196,6 +203,11 @@ release-deploy: ## Deploy release to $APP_NAME's web service in $ENVIRONMENT
@:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "dev")
./bin/deploy-release.sh $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT)

metabase-deploy: ## Deploy metabase to $APP_NAME's web service in $ENVIRONMENT
@:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code)
@:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "dev")
./bin/deploy-metabase.sh $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT)

release-image-name: ## Prints the image name of the release image
@:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code)
@echo $(IMAGE_NAME)
Expand Down
2 changes: 2 additions & 0 deletions OPERATIONS.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ Scaling is handled by configuring the following values:
- instance desired instance count
- instance scaling minimum capacity
- instance scaling maximum capacity
- instance CPU
- instance memory

Our ECS instances auto scale based on both memory and CPU. You can view the autoscaling configuration
here: [infra/modules/service/autoscaling.tf](infra/modules/service/autoscaling.tf)
Expand Down
27 changes: 13 additions & 14 deletions api/openapi.generated.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1268,8 +1268,19 @@ components:
properties:
download_path:
type: string
description: The URL to download the attachment
example: https://...
description: The file's download path
file_size_bytes:
type: integer
description: The size of the file in bytes
example: 1024
created_at:
type: string
format: date-time
readOnly: true
updated_at:
type: string
format: date-time
readOnly: true
mime_type:
type: string
description: The MIME type of the attachment
Expand All @@ -1282,10 +1293,6 @@ components:
type: string
description: A description of the attachment
example: The full announcement NOFO
file_size_bytes:
type: integer
description: The size of the attachment in bytes
example: 10012
opportunity_attachment_type:
description: The type of attachment
example: !!python/object/apply:src.constants.lookup_constants.OpportunityAttachmentType
Expand All @@ -1295,14 +1302,6 @@ components:
- other
type:
- string
created_at:
type: string
format: date-time
readOnly: true
updated_at:
type: string
format: date-time
readOnly: true
OpportunityWithAttachmentsV1:
type: object
properties:
Expand Down
56 changes: 56 additions & 0 deletions api/src/api/extracts_v1/extract_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from src.api.schemas.extension import Schema, fields
from src.api.schemas.response_schema import AbstractResponseSchema, FileResponseSchema
from src.constants.lookup_constants import ExtractType
from src.pagination.pagination_schema import generate_pagination_schema


class ExtractMetadataFilterV1Schema(Schema):
extract_type = fields.Enum(
ExtractType,
allow_none=True,
metadata={
"description": "The type of extract to filter by",
"example": "opportunities_csv",
},
)
start_date = fields.Date(
allow_none=True,
metadata={
"description": "The start date for filtering extracts",
"example": "2023-10-01",
},
)
end_date = fields.Date(
allow_none=True,
metadata={
"description": "The end date for filtering extracts",
"example": "2023-10-07",
},
)


class ExtractMetadataRequestSchema(AbstractResponseSchema):
filters = fields.Nested(ExtractMetadataFilterV1Schema())
pagination = fields.Nested(
generate_pagination_schema(
"ExtractMetadataPaginationV1Schema",
["created_at"],
),
required=True,
)


class ExtractMetadataResponseSchema(FileResponseSchema):
extract_metadata_id = fields.Integer(
metadata={"description": "The ID of the extract metadata", "example": 1}
)
extract_type = fields.String(
metadata={"description": "The type of extract", "example": "opportunity_data_extract"}
)


class ExtractMetadataListResponseSchema(AbstractResponseSchema):
data = fields.List(
fields.Nested(ExtractMetadataResponseSchema),
metadata={"description": "A list of extract metadata records"},
)
19 changes: 6 additions & 13 deletions api/src/api/opportunities_v1/opportunity_schemas.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
from enum import StrEnum

from src.api.schemas.extension import Schema, fields, validators
from src.api.schemas.response_schema import AbstractResponseSchema, PaginationMixinSchema
from src.api.schemas.response_schema import (
AbstractResponseSchema,
FileResponseSchema,
PaginationMixinSchema,
)
from src.api.schemas.search_schema import (
BoolSearchSchemaBuilder,
DateSearchSchemaBuilder,
Expand Down Expand Up @@ -309,13 +313,7 @@ class OpportunityV1Schema(Schema):
updated_at = fields.DateTime(dump_only=True)


class OpportunityAttachmentV1Schema(Schema):
download_path = fields.String(
metadata={
"description": "The URL to download the attachment",
"example": "https://...",
}
)
class OpportunityAttachmentV1Schema(FileResponseSchema):
mime_type = fields.String(
metadata={"description": "The MIME type of the attachment", "example": "application/pdf"}
)
Expand All @@ -328,18 +326,13 @@ class OpportunityAttachmentV1Schema(Schema):
"example": "The full announcement NOFO",
}
)
file_size_bytes = fields.Integer(
metadata={"description": "The size of the attachment in bytes", "example": 10012}
)
opportunity_attachment_type = fields.Enum(
OpportunityAttachmentType,
metadata={
"description": "The type of attachment",
"example": OpportunityAttachmentType.NOTICE_OF_FUNDING_OPPORTUNITY,
},
)
created_at = fields.DateTime(dump_only=True)
updated_at = fields.DateTime(dump_only=True)


class OpportunityWithAttachmentsV1Schema(OpportunityV1Schema):
Expand Down
13 changes: 13 additions & 0 deletions api/src/api/schemas/response_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,16 @@ class ErrorResponseSchema(Schema):
"example": "550e8400-e29b-41d4-a716-446655440000",
}
)


class FileResponseSchema(Schema):
download_path = fields.String(
metadata={
"description": "The file's download path",
},
)
file_size_bytes = fields.Integer(
metadata={"description": "The size of the file in bytes", "example": 1024}
)
created_at = fields.DateTime(dump_only=True)
updated_at = fields.DateTime(dump_only=True)
110 changes: 110 additions & 0 deletions api/tests/src/api/extracts_v1/test_extract_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
from datetime import date

import pytest
from marshmallow import ValidationError

from src.api.extracts_v1.extract_schema import (
ExtractMetadataListResponseSchema,
ExtractMetadataRequestSchema,
ExtractMetadataResponseSchema,
)
from src.db.models.extract_models import ExtractMetadata


@pytest.fixture
def sample_extract_metadata():
return ExtractMetadata(
extract_metadata_id=1,
extract_type="opportunities_csv",
file_name="test_extract.csv",
file_path="/test/path/test_extract.csv",
file_size_bytes=2048,
)


def test_request_schema_validation():
schema = ExtractMetadataRequestSchema()

# Test valid data
valid_data = {
"filters": {
"extract_type": "opportunities_csv",
"start_date": "2023-10-01",
"end_date": "2023-10-07",
},
"pagination": {
"order_by": "created_at",
"page_offset": 1,
"page_size": 25,
"sort_direction": "ascending",
},
}
result = schema.load(valid_data)
assert result["filters"]["extract_type"] == "opportunities_csv"
assert result["filters"]["start_date"] == date(2023, 10, 1)
assert result["filters"]["end_date"] == date(2023, 10, 7)

# Test invalid extract_type
invalid_data = {"extract_type": "invalid_type", "start_date": "2023-10-01"}
with pytest.raises(ValidationError):
schema.load(invalid_data)


def test_response_schema_single(sample_extract_metadata):
schema = ExtractMetadataResponseSchema()

sample_extract_metadata.download_path = "http://www.example.com"
extract_metadata = schema.dump(sample_extract_metadata)

assert extract_metadata["download_path"] == "http://www.example.com"

assert extract_metadata["extract_metadata_id"] == 1
assert extract_metadata["extract_type"] == "opportunities_csv"
assert extract_metadata["download_path"] == "http://www.example.com"
assert extract_metadata["file_size_bytes"] == 2048


def test_response_schema_list(sample_extract_metadata):
schema = ExtractMetadataListResponseSchema()

# Create a list of two metadata records
metadata_list = {
"data": [
sample_extract_metadata,
ExtractMetadata(
extract_metadata_id=2,
extract_type="opportunities_xml",
file_name="test_extract2.xml",
file_path="/test/path/test_extract2.xml",
file_size_bytes=1024,
),
]
}

result = schema.dump(metadata_list)

assert len(result["data"]) == 2
assert result["data"][0]["extract_metadata_id"] == 1
assert result["data"][0]["extract_type"] == "opportunities_csv"
assert result["data"][1]["extract_metadata_id"] == 2
assert result["data"][1]["extract_type"] == "opportunities_xml"


def test_request_schema_null_values():
schema = ExtractMetadataRequestSchema()

# Test with some null values
data = {
"filters": {"extract_type": None, "start_date": "2023-10-01", "end_date": None},
"pagination": {
"order_by": "created_at",
"page_offset": 1,
"page_size": 25,
"sort_direction": "ascending",
},
}

result = schema.load(data)
assert result["filters"]["extract_type"] is None
assert result["filters"]["start_date"] == date(2023, 10, 1)
assert result["filters"]["end_date"] is None
Loading

0 comments on commit 3f5934f

Please sign in to comment.