diff --git a/README.md b/README.md index 945dbc1..f08991b 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ All you need is an AWS account and the ability to create an AWS role and EC2 ins 1. Log into your AWS account and access the Identity and Access Management (IAM) service in the AWS Management Console, then choose [**Create Role**](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-service.html) (you can also use the AWS CLI if you prefer) 2. Select **AWS service** for type of trusted entity 3. Select **EC2** as the allowed service and use case, then choose **Next: Permissions** -4. Select the [**AmazonEC2FullAccess**](https://console.aws.amazon.com/iam/home?region=us-east-1#/policies/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonEC2FullAccess) policy or paste [our recommended policy](https://github.com/rpetrich/patrolaroid/tree/main/docs/recommended-iam-policy.md) (with tighter permissions) into [the JSON editor](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create-console.html#access_policies_create-json-editor), then choose **Next: Tags** +4. Select the [**AmazonEC2FullAccess**](https://console.aws.amazon.com/iam/home?region=us-east-1#/policies/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonEC2FullAccess) and[**AmazonS3FullAccess**](https://console.aws.amazon.com/iam/home?region=us-east-1#/policies/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonS3FullAccess) policies or paste [our recommended policy](https://github.com/rpetrich/patrolaroid/tree/main/docs/recommended-iam-policy.md) (with tighter permissions) into [the JSON editor](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create-console.html#access_policies_create-json-editor), then choose **Next: Tags** 5. No tags are needed, so select **Next: Review** 6. Type **Patrolaroid** for the **Role name** 7. Review the role and, if satisfied, choose **Create role** diff --git a/docs/recommended-iam-policy.md b/docs/recommended-iam-policy.md index 20bc1c8..dd8f1c5 100644 --- a/docs/recommended-iam-policy.md +++ b/docs/recommended-iam-policy.md @@ -15,7 +15,10 @@ For individuals comfortable applying [custom IAM policies](https://docs.aws.amaz "ec2:DeleteVolume", "ec2:DescribeSnapshots", "ec2:DescribeVolumes", - "ec2:DetachVolume" + "ec2:DetachVolume", + "s3:ListBuckets", + "s3:ListObjects", + "s3:GetObject" ], "Effect": "Allow", "Resource": "*" diff --git a/go.mod b/go.mod index a0403b8..3e6897d 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.3.0 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.1.1 github.com/aws/aws-sdk-go-v2/service/ec2 v1.7.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.9.0 github.com/capsule8/go-yara v1.1.10-0.20210523225711-dafe562e8c6e github.com/hillu/go-yara/v4 v4.0.6 // indirect ) diff --git a/go.sum b/go.sum index ac2673c..f7ad117 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,14 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.0.0 h1:k7I9E6tyVWBo7H9ffpnxDWudtjau github.com/aws/aws-sdk-go-v2/internal/ini v1.0.0/go.mod h1:g3XMXuxvqSMUjnsXXp/960152w0wFS4CXVYgQaSVOHE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.7.1 h1:2I6fU3pLkiGOrSRCn8lcftG9Xw57ucxXzf+rOLTR6PY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.7.1/go.mod h1:XzzkrryeCoPUd9jxcdDnI2/UmlfIp13nBSpjl2SDSCM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.1.0 h1:XwqxIO9LtNXznBbEMNGumtLN60k4nVqDpVwVWx3XU/o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.1.0/go.mod h1:zdjOOy0ojUn3iNELo6ycIHSMCp4xUbycSHfb8PnbbyM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.1.1 h1:l7pDLsmOGrnR8LT+3gIv8NlHpUhs7220E457KEC2UM0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.1.1/go.mod h1:2+ehJPkdIdl46VCj67Emz/EH2hpebHZtaLdzqg+sWOI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.3.1 h1:VH1Y4k+IZ5kcRVqSNw7eAkXyfS7k2/ibKjrNtbhYhV4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.3.1/go.mod h1:IpjxfORBAFfkMM0VEx5gPPnEy6WV4Hk0F/+zb/SUWyw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.9.0 h1:FZ5UL5aiybSJKiJglPT7YMMwc431IgOX5gvlFAzSjzs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.9.0/go.mod h1:zHCjYoODbYRLz/iFicYswq1gRoxBnHvpY5h2Vg3/tJ4= github.com/aws/aws-sdk-go-v2/service/sso v1.2.1 h1:alpXc5UG7al7QnttHe/9hfvUfitV8r3w0onPpPkGzi0= github.com/aws/aws-sdk-go-v2/service/sso v1.2.1/go.mod h1:VimPFPltQ/920i1X0Sb0VJBROLIHkDg2MNP10D46OGs= github.com/aws/aws-sdk-go-v2/service/sts v1.4.1 h1:9Z00tExoaLutWVDmY6LyvIAcKjHetkbdmpRt4JN/FN0= diff --git a/main.go b/main.go index 761f9fa..458c017 100644 --- a/main.go +++ b/main.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "io" "io/ioutil" "log" "os" @@ -17,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/s3" yara "github.com/capsule8/go-yara" ) @@ -71,7 +73,8 @@ func run() int { instanceId := string(instanceIdBytes) // parse arguments signaturePathFlag := flag.String("signatures", "./rules", "a path to YARA signatures") - volumeIdsFlag := flag.String("volume-ids", "", "a comma separated list of volume IDs to scan") + volumeIdsFlag := flag.String("volume-ids", "all", "a comma separated list of volume IDs to scan") + bucketIdsFlag := flag.String("bucket-ids", "all", "a comma separated list of bucket IDs to scan") flag.Parse() // load YARA compiler, err := yara.NewCompiler() @@ -118,45 +121,72 @@ func run() int { if ruleCount == 0 { log.Fatalf("no rules to scan files with; place signatures in ./signatures/*.yar") } - // connect to EC2 - client := ec2.NewFromConfig(cfg) + exitCode := 0 dryRun := false // search for volumes - var volumes []volumeInfo - var volumeIds []string if *volumeIdsFlag != "" { - volumeIds = strings.Split(*volumeIdsFlag, ",") - } - var nextToken *string - for { - volumesOutput, err := client.DescribeVolumes(ctx, &ec2.DescribeVolumesInput{ - DryRun: &dryRun, - NextToken: nextToken, - VolumeIds: volumeIds, - }) - if err != nil { - log.Fatalf("describe volumes request failed: %v", err) + var volumes []volumeInfo + var volumeIds []string + if *volumeIdsFlag != "all" { + volumeIds = strings.Split(*volumeIdsFlag, ",") } - for _, volume := range volumesOutput.Volumes { - info := volumeInfo{ - VolumeId: *volume.VolumeId, + // connect to EC2 + client := ec2.NewFromConfig(cfg) + var nextToken *string + for { + volumesOutput, err := client.DescribeVolumes(ctx, &ec2.DescribeVolumesInput{ + DryRun: &dryRun, + NextToken: nextToken, + VolumeIds: volumeIds, + }) + if err != nil { + log.Fatalf("describe volumes request failed: %v", err) } - for _, attachment := range volume.Attachments { - info.Attachments = append(info.Attachments, *attachment.InstanceId) + for _, volume := range volumesOutput.Volumes { + info := volumeInfo{ + VolumeId: *volume.VolumeId, + } + for _, attachment := range volume.Attachments { + info.Attachments = append(info.Attachments, *attachment.InstanceId) + } + volumes = append(volumes, info) + log.Printf("found volume %s", info.VolumeId) + } + if nextToken = volumesOutput.NextToken; nextToken == nil { + break } - volumes = append(volumes, info) - log.Printf("found volume %s", info.VolumeId) } - if nextToken = volumesOutput.NextToken; nextToken == nil { - break + log.Printf("scanning the following volumes: %v", volumes) + for _, volume := range volumes { + if err = processVolume(ctx, client, az, instanceId, volume, r); err != nil { + log.Printf("%v", err) + exitCode = 1 + } } + } else { + log.Printf("skipping scanning volumes, none specified") } - log.Printf("scanning the following volumes: %v", volumes) - exitCode := 0 - for _, volume := range volumes { - if err = processVolume(ctx, client, az, instanceId, volume, r); err != nil { - log.Printf("%v", err) - exitCode = 1 + if *bucketIdsFlag != "" { + var bucketIds []string + client := s3.NewFromConfig(cfg) + if *bucketIdsFlag != "all" { + bucketIds = strings.Split(*bucketIdsFlag, ",") + } else { + // connect to S3 + bucketsOutput, err := client.ListBuckets(ctx, &s3.ListBucketsInput{}) + if err != nil { + log.Fatalf("list buckets request failed: %v", err) + } + for _, bucket := range bucketsOutput.Buckets { + bucketIds = append(bucketIds, *bucket.Name) + } + } + log.Printf("scanning the following buckets: %v", bucketIds) + for _, bucket := range bucketIds { + if err = processBucket(ctx, client, bucket, r); err != nil { + log.Printf("%v", err) + exitCode = 1 + } } } return exitCode @@ -452,12 +482,122 @@ wait_for_volume_detachment: _, err = client.DeleteVolume(ctx, &ec2.DeleteVolumeInput{ VolumeId: &snapshotVolumeId, }) + log.Printf("finished scanning %v", volumeInfo) if err != nil { return fmt.Errorf("delete volume request failed: %v", err) } return errorToReturn } +type s3MemoryIterator struct { + ctx context.Context + client *s3.Client + bucketId string + key string + size int64 + offset int64 + err error +} + +func (i *s3MemoryIterator) First() *yara.MemoryBlock { + i.offset = 0 + return i.Next() +} + +func (i *s3MemoryIterator) Next() *yara.MemoryBlock { + base := i.offset + chunkSize := i.size - base + if chunkSize == 0 { + return nil + } + if chunkSize > 2*1024*1024 { + chunkSize = 2 * 1024 * 1024 + } + i.offset += chunkSize + return &yara.MemoryBlock{ + Base: uint64(base), + Size: uint64(chunkSize), + FetchData: func(buf []byte) { + rangeString := fmt.Sprintf("bytes=%d-%d", base, base+chunkSize) + output, err := i.client.GetObject(i.ctx, &s3.GetObjectInput{ + Bucket: &i.bucketId, + Key: &i.key, + Range: &rangeString, + }) + if err != nil { + i.err = err + } else { + body := output.Body + defer body.Close() + for len(buf) > 0 { + var n int + n, err = body.Read(buf) + buf = buf[n:] + if err != nil { + if err != io.EOF { + i.err = err + } + break + } + } + } + }, + } +} + +func processBucket(ctx context.Context, client *s3.Client, bucketId string, rules *yara.Rules) error { + var wg sync.WaitGroup + pathsToScan := make(chan *s3MemoryIterator, 1024) + for i := 0; i < 64; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for iterator := range pathsToScan { + // Actually scan the file + var m yara.MatchRules + if err := rules.ScanMemBlocks(iterator, 0, 0, &m); err != nil { + log.Printf("could not scan file in bucket %s at path %q: %v", bucketId, iterator.key, err) + } else if iterator.err != nil { + log.Printf("could not scan file in bucket %s at path %q: %v", bucketId, iterator.key, iterator.err) + } else { + // If we have matches, dispatch an alert + if len(m) != 0 { + for _, match := range m { + log.Printf("file in bucket %s at path %q violated rule %q from %q", bucketId, iterator.key, match.Rule, match.Namespace) + } + } + } + } + }() + } + log.Printf("scanning bucket %s", bucketId) + var continuationToken *string + for { + listObjectsOutput, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: &bucketId, + }) + if err != nil { + log.Fatalf("describe volumes request failed: %v", err) + } + for _, object := range listObjectsOutput.Contents { + pathsToScan <- &s3MemoryIterator{ + ctx: ctx, + client: client, + bucketId: bucketId, + key: *object.Key, + size: object.Size, + } + } + if continuationToken = listObjectsOutput.NextContinuationToken; continuationToken == nil { + break + } + } + close(pathsToScan) + wg.Wait() + log.Printf("finished scanning %s", bucketId) + return nil +} + func main() { os.Exit(run()) }