diff --git a/.gitignore b/.gitignore index ee45e62..9e6ca9c 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,6 @@ node_modules/ cdk.context.json cdk.out/ + + +.DS_Store diff --git a/README.md b/README.md index 98cbc98..48e7bce 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,16 @@ test invocations. { "sourceFilesCsvBucket": "bucket-with-csv", "sourceFilesCsvKey": "key-of-source-files.csv", - "destinationBucket": "a-target-bucket-in-same-region", + "destinationBucket": "a-target-bucket-in-same-region-but-not-same-account", "maxItemsPerBatch": 10 } ``` + +The copy will fan out wide (to sensible width (~ 100)) - but there is a small cost to the startup/shutdown +of the Fargate tasks. The maxItemsPerBatch controls how many individuals files are attempted per +Fargate task - though noting that we request SPOT tasks. + +So there is balance between the likelihood of SPOT interruptions v re-use of Fargate tasks. If +tasks are SPOT interrupted - then the next invocation will skip already transferred files (assuming +at least one is copied) - so it is probably safe and cheapest to leave the items per batch at 10 +and be prepared to perhaps re-execute the copy. diff --git a/dev/EXAMPLE-COPY-README.md b/dev/EXAMPLE-COPY-README.md new file mode 100644 index 0000000..83db531 --- /dev/null +++ b/dev/EXAMPLE-COPY-README.md @@ -0,0 +1,18 @@ +How to do a full scale invoke test. + +Go to "elsa-data-tmp" bucket in dev. +It probably will be empty as objects auto-expire. +Make a folder "copy-out-test-working". +Copy "example-copy-manifest.csv" to that folder. + +THE FOLDER MUST BE EXACTLY AS SPECIFIED AS THAT PERMISSION IS BAKED INTO +THE DEV DEPLOYMENT (IN ORDER TO TEST PERMISSIONS!) + +Invoke the dev Steps with the input + +{ +"sourceFilesCsvBucket": "elsa-data-tmp", +"sourceFilesCsvKey": "example-copy-manifest.csv", +"destinationBucket": "elsa-data-copy-target-sydney", +"maxItemsPerBatch": 2 +} diff --git a/dev/dev.ts b/dev/dev.ts index f3ab8b5..af8122e 100644 --- a/dev/dev.ts +++ b/dev/dev.ts @@ -1,5 +1,5 @@ import { CopyOutStateMachineConstruct } from "aws-copy-out-sharer"; -import { SubnetType } from "aws-cdk-lib/aws-ec2"; +import { SubnetType, Vpc } from "aws-cdk-lib/aws-ec2"; import { App, Stack, StackProps } from "aws-cdk-lib"; import { InfrastructureClient } from "@elsa-data/aws-infrastructure"; import { Service } from "aws-cdk-lib/aws-servicediscovery"; @@ -12,6 +12,7 @@ const description = "Bulk copy-out service for Elsa Data - an application for controlled genomic data sharing"; const devId = "ElsaDataDevCopyOutStack"; +const agId = "ElsaDataAgCopyOutStack"; /** * Wraps the copy out construct for development purposes. We don't do this Stack definition in the @@ -70,3 +71,40 @@ new ElsaDataCopyOutStack(app, devId, { }, description: description, }); + +/** + * Wraps an even simpler deployment direct for AG. We have needs to do AG copies + * outside of Elsa. This is also a good test of the copy-out mechanics. So this + * allows us to directly deploy/destroy. + */ +class ElsaDataSimpleCopyOutStack extends Stack { + constructor(scope?: Construct, id?: string, props?: StackProps) { + super(scope, id, props); + + const vpc = Vpc.fromLookup(this, "Vpc", { vpcName: "main-vpc" }); + + const copyOut = new CopyOutStateMachineConstruct(this, "CopyOut", { + vpc: vpc, + vpcSubnetSelection: SubnetType.PRIVATE_WITH_EGRESS, + workingBucket: "elsa-data-copy-working", + workingBucketPrefixKey: "temp/", + aggressiveTimes: false, + allowWriteToInstalledAccount: true, + }); + + //stateMachineArn: copyOut.stateMachine.stateMachineArn, + } +} + +new ElsaDataSimpleCopyOutStack(app, agId, { + // the stack can only be deployed to 'dev' + env: { + account: "602836945884", + region: "ap-southeast-2", + }, + tags: { + "umccr-org:Product": "ElsaData", + "umccr-org:Stack": agId, + }, + description: description, +}); diff --git a/dev/example-copy-manifest.csv b/dev/example-copy-manifest.csv new file mode 100644 index 0000000..05ebf63 --- /dev/null +++ b/dev/example-copy-manifest.csv @@ -0,0 +1,12 @@ +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.bcf" +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.bcf.csi" +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.vcf" +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.vcf.gz" +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.vcf.gz.csi" +umccr-10f-data-dev,"ASHKENAZIM/HG002-HG003-HG004.joint.filter.vcf.gz.tbi" +umccr-10f-data-dev,ASHKENAZIM/HG002.bam +umccr-10f-data-dev,ASHKENAZIM/HG002.bam.bai +umccr-10f-data-dev,ASHKENAZIM/HG003.bam +umccr-10f-data-dev,ASHKENAZIM/HG003.bam.bai +umccr-10f-data-dev,ASHKENAZIM/HG004.bam +umccr-10f-data-dev,ASHKENAZIM/HG004.bam.bai diff --git a/dev/package.json b/dev/package.json index e5bc4f8..b2ddf9c 100644 --- a/dev/package.json +++ b/dev/package.json @@ -4,8 +4,10 @@ "version": "0.0.0", "description": "Manual CDK deployment for development", "scripts": { - "deploy": "pnpm -w run build && cdk deploy", - "destroy": "pnpm -w run build && cdk destroy", + "deploy": "pnpm -w run build && cdk deploy ElsaDataDevCopyOutStack", + "destroy": "pnpm -w run build && cdk destroy ElsaDataDevCopyOutStack", + "agdeploy": "pnpm -w run build && cdk deploy ElsaDataAgCopyOutStack", + "agdestroy": "pnpm -w run build && cdk destroy ElsaDataAgCopyOutStack", "test": "ts-node --prefer-ts-exts test.ts", "test-quick": "ts-node --prefer-ts-exts test.ts" }, diff --git a/packages/aws-copy-out-sharer/lambda/summarise-copy-lambda/summarise-copy-lambda.ts b/packages/aws-copy-out-sharer/lambda/summarise-copy-lambda/summarise-copy-lambda.ts index a44ba9d..444cd31 100644 --- a/packages/aws-copy-out-sharer/lambda/summarise-copy-lambda/summarise-copy-lambda.ts +++ b/packages/aws-copy-out-sharer/lambda/summarise-copy-lambda/summarise-copy-lambda.ts @@ -31,12 +31,14 @@ export async function handler(event: InvokeEvent) { // the manifest.json is generated by an AWS Steps DISTRIBUTED map and shows the results // of all the individual map run parts const getManifestCommand = new GetObjectCommand({ - Bucket: event.destinationBucket, + Bucket: (event as any).sourceFilesCsvBucket, Key: event.rcloneResults.manifestAbsoluteKey, }); const getManifestResult = await client.send(getManifestCommand); + const getManifestContent = await getManifestResult.Body.transformToString(); + // A sample manifest // {"DestinationBucket":"elsa-data-tmp", // "MapRunArn":"arn:aws:states:ap-southeast-2:12345678:mapRun:CopyOutStateMachineABCD/4474d22f-4056-30e3-978c-027016edac90:0c17ffd6-e8ad-44c0-a65b-a8b721007241", @@ -48,6 +50,8 @@ export async function handler(event: InvokeEvent) { const manifest = JSON.parse(getManifestContent); + console.debug(JSON.stringify(manifest, null, 2)); + const rf = manifest["ResultFiles"]; if (!rf) @@ -80,7 +84,7 @@ export async function handler(event: InvokeEvent) { for (const s of succeeded) { const getSuccessCommand = new GetObjectCommand({ - Bucket: event.destinationBucket, + Bucket: (event as any).sourceFilesCsvBucket, Key: s["Key"], }); @@ -123,8 +127,8 @@ export async function handler(event: InvokeEvent) { // looking const errors: number = rcloneRow["errors"]; const lastError: number = rcloneRow["lastError"]; - const copiedBytes: number = rcloneRow["serverSideCopyBytes"]; - const copySeconds = rcloneRow["elapsedTime"]; + const serverSideCopyBytes: number = rcloneRow["serverSideCopyBytes"]; + const elapsedTime = rcloneRow["elapsedTime"]; const totalTransfers = rcloneRow["totalTransfers"]; const retryError = rcloneRow["retryError"]; @@ -160,11 +164,13 @@ export async function handler(event: InvokeEvent) { }; } else { // if we did do a copy then copySeconds will normally be a value and we can compute a speed - if (copySeconds) + if (elapsedTime) fileResults[b] = { name: b, status: "COPIED", - speed: Math.floor(copiedBytes / copySeconds / 1024 / 1024), + speed: Math.floor( + serverSideCopyBytes / elapsedTime / 1024 / 1024, + ), message: "", }; } diff --git a/packages/aws-copy-out-sharer/src/copy-out-state-machine-construct.ts b/packages/aws-copy-out-sharer/src/copy-out-state-machine-construct.ts index 64d57bd..27739e5 100644 --- a/packages/aws-copy-out-sharer/src/copy-out-state-machine-construct.ts +++ b/packages/aws-copy-out-sharer/src/copy-out-state-machine-construct.ts @@ -165,9 +165,10 @@ export class CopyOutStateMachineConstruct extends Construct { "s3:AbortMultipartUpload", ], resources: [ - `arn:aws:s3:::${props.workingBucket}/${ - props.workingBucketPrefixKey ?? "" - }*`, + "*", + //`arn:aws:s3:::${props.workingBucket}/${ + // props.workingBucketPrefixKey ?? "" + //}*`, ], }), ); diff --git a/packages/aws-copy-out-sharer/src/rclone-run-task-construct.ts b/packages/aws-copy-out-sharer/src/rclone-run-task-construct.ts index 3c0f3a6..7cb4764 100644 --- a/packages/aws-copy-out-sharer/src/rclone-run-task-construct.ts +++ b/packages/aws-copy-out-sharer/src/rclone-run-task-construct.ts @@ -73,6 +73,25 @@ export class RcloneRunTaskConstruct extends Construct { }), ); + /* + import { LinuxParameters } from "aws-cdk-lib/aws-ecs"; + const linux = new LinuxParameters(this, "Linux", { + + }); + + linux.addTmpfs( + { + "mountOptions": [ TmpfsMountOption.RW ], + "containerPath": "/run", + "size": 10 + }, + { + "mountOptions": [ TmpfsMountOption.RW], + "containerPath": "/tmp", + "size": 10 + } + ); */ + const containerDefinition = taskDefinition.addContainer("RcloneContainer", { // set the stop timeout to the maximum allowed under Fargate Spot // potentially this will let us finish our rclone operation (!!! - we don't actually try to let rclone finish - see Docker image - we should) @@ -84,6 +103,10 @@ export class RcloneRunTaskConstruct extends Construct { platform: Platform.LINUX_AMD64, }, ), + readonlyRootFilesystem: true, + // https://stackoverflow.com/questions/68933848/how-to-allow-container-with-read-only-root-filesystem-writing-to-tmpfs-volume + // DOESN'T WORK FOR FARGATE SO NEED TO THINK ABOUT THIS OTHER WAY + // linuxParameters: linux, logging: LogDriver.awsLogs({ streamPrefix: "elsa-data-copy-out", logRetention: RetentionDays.ONE_WEEK, diff --git a/packages/aws-copy-out-sharer/src/s3-csv-distributed-map.ts b/packages/aws-copy-out-sharer/src/s3-csv-distributed-map.ts index e7dc97e..e561365 100644 --- a/packages/aws-copy-out-sharer/src/s3-csv-distributed-map.ts +++ b/packages/aws-copy-out-sharer/src/s3-csv-distributed-map.ts @@ -7,7 +7,12 @@ import { StateGraph, StateMachine, } from "aws-cdk-lib/aws-stepfunctions"; -import { Effect, Policy, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { + Effect, + ManagedPolicy, + Policy, + PolicyStatement, +} from "aws-cdk-lib/aws-iam"; import { Construct } from "constructs"; export interface S3CsvDistributedMapProps { @@ -91,6 +96,10 @@ export class S3CsvDistributedMap ); this.policy.attachToRole(stateMachine.role); + + stateMachine.role.addManagedPolicy( + ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess"), + ); } protected makeNext(next: State) { diff --git a/packages/aws-copy-out-sharer/src/summarise-copy-lambda-step-construct.ts b/packages/aws-copy-out-sharer/src/summarise-copy-lambda-step-construct.ts index 69103ed..db52a58 100644 --- a/packages/aws-copy-out-sharer/src/summarise-copy-lambda-step-construct.ts +++ b/packages/aws-copy-out-sharer/src/summarise-copy-lambda-step-construct.ts @@ -83,12 +83,13 @@ export class SummariseCopyLambdaStepConstruct extends Construct { summariseCopyLambda.addToRolePolicy( new PolicyStatement({ effect: Effect.ALLOW, - actions: ["s3:GetObject"], - resources: [ - `arn:aws:s3:::${props.workingBucket}/${ - props.workingBucketPrefixKey ?? "" - }*`, - ], + actions: ["s3:*"], + resources: ["*"], + //[ + //`arn:aws:s3:::${props.workingBucket}/${ + // props.workingBucketPrefixKey ?? "" + //}*`, + //], }), );