diff --git a/.github/workflows/PR-build.yml b/.github/workflows/PR-build.yml new file mode 100644 index 0000000000..50f45f310f --- /dev/null +++ b/.github/workflows/PR-build.yml @@ -0,0 +1,109 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: PR Build +on: + push: + branches: + - main + paths-ignore: + - '**/*.md' + - 'NOTICE' + - 'RELEASE_NOTES' + - 'THIRD-PARTY' + - 'LICENSE' + - '.github/**' + - '!.github/workflows/PR-build.yml' + + pull_request: + branches: + - main + types: + - opened + - synchronize + - reopened + - ready_for_review + paths-ignore: + - '**/*.md' + - 'NOTICE' + - 'RELEASE_NOTES' + - 'THIRD-PARTY' + - 'LICENSE' + - '.github/**' + - '!.github/workflows/PR-build.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + build: + name: Build ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, windows-2019, windows-latest, macos-latest] + include: + - os: ubuntu-latest + family: linux + cache-path: | + ~/.cache/go-build + ~/go/pkg/mod + - os: macos-latest + family: darwin + cache-path: | + ~/Library/Caches/go-build + ~/go/pkg/mod + - os: windows-2019 + family: windows + cache-path: | + ~\AppData\Local\go-build + ~\go\pkg\mod + - os: windows-latest + family: windows + cache-path: | + ~\AppData\Local\go-build + ~\go\pkg\mod + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v4 + with: + go-version: ~1.19.6 + cache: false + + - name: Check out code + uses: actions/checkout@v3 + + - name: Cache binaries + id: cached_binaries + uses: actions/cache@v3 + with: + key: "cached-binaries-${{ matrix.os }}-${{ github.sha }}" + path: go.mod + + - name: Cache build output + if: steps.cached_binaries.outputs.cache-hit != 'true' + uses: actions/cache@v3 + with: + path: ${{ matrix.cache-path }} + key: v1-go-pkg-mod-${{ matrix.os }}-${{ hashFiles('**/go.sum') }} + + - name: Install make + if: matrix.family == 'windows' && steps.cached_binaries.outputs.cache-hit != 'true' + run: choco install make + + - name: Unit Test + if: steps.cached_binaries.outputs.cache-hit != 'true' + run: make test + + - name: Upload coverage to Codecov + if: steps.cached_binaries.outputs.cache-hit != 'true' + uses: codecov/codecov-action@v3 + with: + verbose: true + + - name: Build + if: steps.cached_binaries.outputs.cache-hit != 'true' + run: make amazon-cloudwatch-agent-${{ matrix.family }} + \ No newline at end of file diff --git a/.github/workflows/build-test-linux.yml b/.github/workflows/build-test-linux.yml deleted file mode 100644 index cbfc723daa..0000000000 --- a/.github/workflows/build-test-linux.yml +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: build-and-test-linux -on: - push: - branches: - - main - paths-ignore: - - '**/*.md' - - 'NOTICE' - - 'RELEASE_NOTES' - - 'THIRD-PARTY' - - 'LICENSE' - - '.github/**' - - '!.github/workflows/build-*' - - - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true - -jobs: - linux-unittest: - runs-on: ubuntu-latest - steps: - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.19.2 - cache: false - - - name: Check out code - uses: actions/checkout@v3 - with: - fetch-depth: 0 - submodules: 'true' - - # This requires the go mod tidy to not look at the go proxy - - name: Go proxy direct - run: GOPROXY=direct - - - uses: zencargo/github-action-go-mod-tidy@v1 - with: - go-version: 1.19.2 - - - name: Cache build output - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - - name: Test - run: make test - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - verbose: true - - - name: Build - run: make build \ No newline at end of file diff --git a/.github/workflows/build-test-macos.yml b/.github/workflows/build-test-macos.yml deleted file mode 100644 index f32138d259..0000000000 --- a/.github/workflows/build-test-macos.yml +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: build-and-test-macos -on: - push: - branches: - - main - paths-ignore: - - '**/*.md' - - 'NOTICE' - - 'RELEASE_NOTES' - - 'THIRD-PARTY' - - 'LICENSE' - - '.github/**' - - '!.github/workflows/build-*' - - - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true - -jobs: - macos-unittest: - runs-on: macos-11 - steps: - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.19.2 - cache: false - - - name: Check out code - uses: actions/checkout@v3 - with: - fetch-depth: 0 - submodules: 'true' - - - name: Cache build output - uses: actions/cache@v3 - with: - path: | - ~/Library/Caches/go-build - ~/go/pkg/mod - key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - - name: Test - run: make test - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - verbose: true - - - name: Build - run: make build \ No newline at end of file diff --git a/.github/workflows/build-test-windows.yml b/.github/workflows/build-test-windows.yml deleted file mode 100644 index 3ec0220a0b..0000000000 --- a/.github/workflows/build-test-windows.yml +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: build-and-test-windows -on: - push: - branches: - - main - paths-ignore: - - '**/*.md' - - 'NOTICE' - - 'RELEASE_NOTES' - - 'THIRD-PARTY' - - 'LICENSE' - - '.github/**' - - '!.github/workflows/build-*' - - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true - -jobs: - windows-unittest: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ windows-2019, windows-latest ] - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - - name: Setup Go - uses: actions/setup-go@v4 - with: - go-version: ~1.19.2 - cache: false - - - name: Cache Go - uses: actions/cache@v3 - with: - path: | - %LocalAppData%\go-build - ~/go/pkg/mod - key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - - name: Install make - run: choco install make - - - name: Run Unit tests - run: make test - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - verbose: true - - - name: Run build - run: make build \ No newline at end of file diff --git a/.github/workflows/integrationTest.yml b/.github/workflows/integration-test.yml similarity index 81% rename from .github/workflows/integrationTest.yml rename to .github/workflows/integration-test.yml index 67161acf7f..8722e48243 100644 --- a/.github/workflows/integrationTest.yml +++ b/.github/workflows/integration-test.yml @@ -22,6 +22,8 @@ on: - 'RELEASE_NOTES' - 'THIRD-PARTY' - 'LICENSE' + - '.github/**' + - '!.github/workflows/integration-test.yml' workflow_dispatch: inputs: plugins: @@ -50,7 +52,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 cache: false - name: Install rpm @@ -62,15 +64,6 @@ jobs: role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} aws-region: us-west-2 - - name: Cache go - id: cached_go - uses: actions/cache@v3 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - name: Cache binaries id: cached_binaries uses: actions/cache@v3 @@ -78,7 +71,17 @@ jobs: key: "cached_binaries_${{ github.sha }}" path: go.mod + - name: Cache go + if: steps.cached_binaries.outputs.cache-hit != 'true' + uses: actions/cache@v2 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + - name: Import GPG Key + if: steps.cached_binaries.outputs.cache-hit != 'true' uses: crazy-max/ghaction-import-gpg@v5 with: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} @@ -86,9 +89,10 @@ jobs: - name: Build Binaries if: steps.cached_binaries.outputs.cache-hit != 'true' - run: make build package-rpm package-deb package-win + run: make amazon-cloudwatch-agent-linux amazon-cloudwatch-agent-windows package-rpm package-deb package-win - name: Sign Build Files + if: steps.cached_binaries.outputs.cache-hit != 'true' run: for f in $(find build/bin/); do if [ ! -d $f ]; then echo "Signing file $f" && gpg --detach-sign $f ; fi ; done @@ -129,13 +133,14 @@ jobs: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ github.sha }} platforms: linux/amd64, linux/arm64 - GenerateTestMatrix: name: 'GenerateTestMatrix' runs-on: ubuntu-latest outputs: ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }} ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }} + ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }} + ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }} ec2_performance_matrix: ${{steps.set-matrix.outputs.ec2_performance_matrix}} ec2_stress_matrix: ${{steps.set-matrix.outputs.ec2_stress_matrix}} ecs_ec2_launch_daemon_matrix: ${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }} @@ -149,7 +154,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 - name: Generate matrix id: set-matrix @@ -157,18 +162,24 @@ jobs: go run --tags=generator generator/test_case_generator.go echo "::set-output name=ec2_gpu_matrix::$(echo $(cat generator/resources/ec2_gpu_complete_test_matrix.json))" echo "::set-output name=ec2_linux_matrix::$(echo $(cat generator/resources/ec2_linux_complete_test_matrix.json))" + echo "::set-output name=ec2_windows_matrix::$(echo $(cat generator/resources/ec2_windows_complete_test_matrix.json))" + echo "::set-output name=ec2_mac_matrix::$(echo $(cat generator/resources/ec2_mac_complete_test_matrix.json))" echo "::set-output name=ec2_performance_matrix::$(echo $(cat generator/resources/ec2_performance_complete_test_matrix.json))" echo "::set-output name=ec2_stress_matrix::$(echo $(cat generator/resources/ec2_stress_complete_test_matrix.json))" echo "::set-output name=ecs_ec2_launch_daemon_matrix::$(echo $(cat generator/resources/ecs_ec2_daemon_complete_test_matrix.json))" echo "::set-output name=ecs_fargate_matrix::$(echo $(cat generator/resources/ecs_fargate_complete_test_matrix.json))" + - name: Echo test plan matrix run: | echo "ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }}" echo "ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }}" + echo "ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }}" + echo "ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }}" echo "ec2_performance_matrix: ${{ steps.set-matrix.outputs.ec2_performance_matrix}}" echo "ec2_stress_matrix: ${{ steps.set-matrix.outputs.ec2_stress_matrix}}" echo "ecs_ec2_launch_daemon_matrix${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }}" echo "ecs_fargate_matrix${{ steps.set-matrix.outputs.ecs_fargate_matrix }}" + MakeMSIZip: name: 'MakeMSIZip' runs-on: ubuntu-latest @@ -184,7 +195,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -216,13 +227,15 @@ jobs: mkdir msi_dep cp -r msi/tools/. msi_dep/ cp -r windows-agent/amazon-cloudwatch-agent/. msi_dep/ - go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/amazon-cloudwatch-agent.wxs '' --tags=integration - go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/manifest.json __VERSION__ --tags=integration + go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/amazon-cloudwatch-agent.wxs '' + go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/manifest.json __VERSION__ + - name: Zip if: steps.cached_win_zip.outputs.cache-hit != 'true' run: | sudo apt install zip zip buildMSI.zip msi_dep/* + - name: Upload zip if: steps.cached_win_zip.outputs.cache-hit != 'true' run: aws s3 cp buildMSI.zip s3://${S3_INTEGRATION_BUCKET}/integration-test/packaging/${{ github.sha }}/buildMSI.zip @@ -238,6 +251,7 @@ jobs: with: path: cwa fetch-depth: 0 + - uses: actions/checkout@v3 with: repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} @@ -246,7 +260,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -254,20 +268,29 @@ jobs: role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} aws-region: us-west-2 - - name: Cache pkg - id: cached_pkg + - name: Cache binaries + id: cached_binaries uses: actions/cache@v3 with: - key: "cached_pkg_${{ github.sha }}" + key: "cached-binaries-${{ runner.os }}-${{ github.sha }}" path: go.mod + - name: Cache pkg + if: steps.cached_binaries.outputs.cache-hit != 'true' + uses: actions/cache@v2 + with: + path: | + ~/Library/Caches/go-build + ~/go/pkg/mod + key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + - name: Build Binaries - if: steps.cached_pkg.outputs.cache-hit != 'true' + if: steps.cached_binaries.outputs.cache-hit != 'true' working-directory: cwa - run: make build package-darwin + run: make amazon-cloudwatch-agent-mac package-darwin - name: Copy binary - if: steps.cached_pkg.outputs.cache-hit != 'true' + if: steps.cached_binaries.outputs.cache-hit != 'true' working-directory: cwa run: | echo cw agent version $(cat CWAGENT_VERSION) @@ -276,14 +299,14 @@ jobs: cp build/bin/CWAGENT_VERSION /tmp/CWAGENT_VERSION - name: Create pkg dep folder and copy deps - if: steps.cached_pkg.outputs.cache-hit != 'true' + if: steps.cached_binaries.outputs.cache-hit != 'true' working-directory: test run: | cp -r pkg/tools/. /tmp/ cp -r pkg/tools/. /tmp/arm64/ - name: Build And Upload PKG - if: steps.cached_pkg.outputs.cache-hit != 'true' + if: steps.cached_binaries.outputs.cache-hit != 'true' working-directory: /tmp/ run : | chmod +x create_pkg.sh @@ -330,6 +353,7 @@ jobs: Expand-Archive buildMSI.zip -Force cd buildMSI/msi_dep .\create_msi.ps1 ${{ github.sha }} ${{ secrets.S3_INTEGRATION_BUCKET }} + #GH actions set up gpg only works on ubuntu as of this commit date GPGSignMacAndWindowsPackage: name: 'SignMacAndWindowsPackage' @@ -377,6 +401,7 @@ jobs: aws s3 cp packages/amazon-cloudwatch-agent.msi.sig s3://${S3_INTEGRATION_BUCKET}/integration-test/packaging/${{ github.sha }}/amazon-cloudwatch-agent.msi.sig aws s3 cp packages/amd64/amazon-cloudwatch-agent.pkg.sig s3://${S3_INTEGRATION_BUCKET}/integration-test/packaging/${{ github.sha }}/amd64/amazon-cloudwatch-agent.pkg.sig aws s3 cp packages/arm64/amazon-cloudwatch-agent.pkg.sig s3://${S3_INTEGRATION_BUCKET}/integration-test/packaging/${{ github.sha }}/arm64/amazon-cloudwatch-agent.pkg.sig + StartLocalStack: name: 'StartLocalStack' runs-on: ubuntu-latest @@ -420,6 +445,7 @@ jobs: echo $LOCAL_STACK_HOST_NAME && echo "::set-output name=local_stack_host_name::$LOCAL_STACK_HOST_NAME" && aws s3 cp terraform.tfstate s3://${S3_INTEGRATION_BUCKET}/integration-test/local-stack-terraform-state/${GITHUB_SHA}/terraform.tfstate + EC2NvidiaGPUIntegrationTest: needs: [ MakeBinary, BuildMSI, StartLocalStack, GenerateTestMatrix ] name: 'EC2NVIDIAGPUIntegrationTest' @@ -480,7 +506,6 @@ jobs: -var="local_stack_host_name=${{ needs.StartLocalStack.outputs.local_stack_host_name }}" \ -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ -var="ssh_key_name=${KEY_NAME}" \ - -var="test_name=cw-integ-test-${{ matrix.arrays.os }}" \ -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve else terraform destroy -auto-approve && exit 1 @@ -520,6 +545,7 @@ jobs: cd terraform/ec2/linux fi terraform destroy --auto-approve + EC2LinuxIntegrationTest: needs: [MakeBinary, StartLocalStack, GenerateTestMatrix] name: 'Test' @@ -581,7 +607,6 @@ jobs: -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ -var="plugin_tests='${{ github.event.inputs.plugins }}'" \ -var="ssh_key_name=${KEY_NAME}" \ - -var="test_name=${{ matrix.arrays.os }}" \ -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve else terraform destroy -auto-approve && exit 1 @@ -596,71 +621,142 @@ jobs: retry_wait_seconds: 5 command: cd terraform/ec2/linux && terraform destroy --auto-approve -# @TODO add back when we add back windows tests -# EC2WinIntegrationTest: -# needs: [BuildMSI, GenerateTestMatrix] -# name: 'EC2WinIntegrationTest' -# runs-on: ubuntu-latest -# strategy: -# fail-fast: false -# matrix: -# arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_matrix) }} -# permissions: -# id-token: write -# contents: read -# steps: -# - uses: actions/checkout@v2 -# -# - name: Configure AWS Credentials -# uses: aws-actions/configure-aws-credentials@v1 -# with: -# role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} -# aws-region: us-west-2 -# -# - name: Cache if success -# id: ec2-win-integration-test -# uses: actions/cache@v3 -# with: -# path: go.mod -# key: ec2-win-integration-test-${{ github.sha }}-${{ matrix.arrays.os }} -# -# - name: Echo OS -# run: echo run on ec2 instance os ${{ matrix.arrays.os }} -# -# - name: Verify Terraform version -# run: terraform --version -# -# # nick-fields/retry@v2 starts at base dir -# - name: Terraform apply -# if: steps.ec2-win-integration-test.outputs.cache-hit != 'true' -# uses: nick-fields/retry@v2 -# with: -# max_attempts: 3 -# timeout_minutes: 15 -# retry_wait_seconds: 5 -# command: | -# cd integration/terraform/ec2/win -# terraform init -# if terraform apply --auto-approve \ -# -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ -# -var="github_repo=${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" \ -# -var="cwa_github_sha=${GITHUB_SHA}" -var="ami=${{ matrix.arrays.ami }}" \ -# -var="test_dir=${{ matrix.arrays.test_dir }}" \ -# -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then -# terraform destroy -auto-approve -# else -# terraform destroy -auto-approve && exit 1 -# fi -# -# #This is here just in case workflow cancel -# - name: Terraform destroy -# if: ${{ cancelled() && steps.ec2-win-integration-test.outputs.cache-hit != 'true' }} -# uses: nick-fields/retry@v2 -# with: -# max_attempts: 3 -# timeout_minutes: 8 -# retry_wait_seconds: 5 -# command: cd integration/terraform/ec2/win && terraform destroy --auto-approve -var="ami=${{ matrix.arrays.ami }}" + EC2WinIntegrationTest: + needs: [BuildMSI, GenerateTestMatrix] + name: 'EC2WinIntegrationTest' + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v2 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache if success + id: ec2-win-integration-test + uses: actions/cache@v2 + with: + path: go.mod + key: ec2-win-integration-test-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.arc }}-${{ matrix.arrays.test_dir }} + + - name: Echo OS + run: echo run on ec2 instance os ${{ matrix.arrays.os }} + + - name: Verify Terraform version + run: terraform --version + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + if: steps.ec2-win-integration-test.outputs.cache-hit != 'true' + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 30 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/win + terraform init + if terraform apply --auto-approve \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ + -var="cwa_github_sha=${GITHUB_SHA}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() && steps.ec2-win-integration-test.outputs.cache-hit != 'true' }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/win && terraform destroy --auto-approve + + EC2DarwinIntegrationTest: + needs: [MakeMacPkg, EC2LinuxIntegrationTest, GenerateTestMatrix] + name: 'EC2DarwinIntegrationTest' + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_mac_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v2 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache if success + id: ec2-mac-integration-test + uses: actions/cache@v2 + with: + path: go.mod + key: ec2-mac-integration-test-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.arc }}-${{ matrix.arrays.test_dir }} + + - name: Echo OS + run: echo run on ec2 instance os ${{ matrix.arrays.os }} + + - name: Verify Terraform version + run: terraform --version + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + if: steps.ec2-mac-integration-test.outputs.cache-hit != 'true' + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 30 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/mac + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="cwa_github_sha=${GITHUB_SHA}" -var="ami=${{ matrix.arrays.ami }}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() && steps.ec2-mac-integration-test.outputs.cache-hit != 'true' }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/mac && terraform destroy --auto-approve StopLocalStack: name: 'StopLocalStack' @@ -720,17 +816,24 @@ jobs: role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} aws-region: us-west-2 + - name: Cache if success + id: ecs-ec2-integration-test + uses: actions/cache@v2 + with: + path: go.mod + key: ecs-ec2-integration-test-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.test_dir }} + - name: Login ECR id: login-ecr - if: steps.ecs-ec2-launch-daemon-integration-test.outputs.cache-hit != 'true' + if: steps.ecs-ec2-integration-test.outputs.cache-hit != 'true' uses: aws-actions/amazon-ecr-login@v1 - name: Verify Terraform version - if: steps.ecs-ec2-launch-daemon-integration-test.outputs.cache-hit != 'true' + if: steps.ecs-ec2-integration-test.outputs.cache-hit != 'true' run: terraform --version - name: Terraform apply - if: steps.ecs-ec2-launch-daemon-integration-test.outputs.cache-hit != 'true' + if: steps.ecs-ec2-integration-test.outputs.cache-hit != 'true' uses: nick-fields/retry@v2 with: max_attempts: 3 @@ -741,6 +844,7 @@ jobs: terraform init if terraform apply --auto-approve\ -var="test_dir=${{ matrix.arrays.test_dir }}"\ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}"\ -var="cwagent_image_tag=${{ github.sha }}"\ -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ @@ -749,8 +853,9 @@ jobs: else terraform destroy -auto-approve && exit 1 fi + - name: Terraform destroy - if: ${{ cancelled() && steps.ecs-ec2-launch-integration-test.outputs.cache-hit != 'true' }} + if: ${{ cancelled() && steps.ecs-ec2-integration-test.outputs.cache-hit != 'true' }} uses: nick-fields/retry@v2 with: max_attempts: 3 @@ -869,16 +974,13 @@ jobs: cd terraform/performance terraform init if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ - -var="cwa_github_sha=${GITHUB_SHA}" -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="cwa_github_sha=${GITHUB_SHA}" \ -var="user=${{ matrix.arrays.username }}" \ -var="ami=${{ matrix.arrays.ami }}" \ -var="arc=${{ matrix.arrays.arc }}" \ - -var="binary_name=${{ matrix.arrays.binaryName }}" \ -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ -var="ssh_key_name=${KEY_NAME}" \ - -var="test_name=${{ matrix.arrays.os }}" \ -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve else @@ -947,17 +1049,13 @@ jobs: # terraform init # if terraform apply --auto-approve \ # -var="ssh_key_value=${PRIVATE_KEY}" -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ -# -var="cwa_github_sha=${GITHUB_SHA}" -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ +# -var="cwa_github_sha=${GITHUB_SHA}" \ # -var="user=${{ matrix.arrays.username }}" \ # -var="ami=${{ matrix.arrays.ami }}" \ -# -var="ca_cert_path=${{ matrix.arrays.caCertPath }}" \ # -var="arc=${{ matrix.arrays.arc }}" \ -# -var="binary_name=${{ matrix.arrays.binaryName }}" \ -# -var="local_stack_host_name=${{ needs.StartLocalStack.outputs.local_stack_host_name }}" \ # -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ # -var="ssh_key_name=${KEY_NAME}" \ # -var="cwa_github_sha_date=${{ steps.sha_date.outputs.sha_date }}" \ -# -var="test_name=${{ matrix.arrays.os }}" \ # -var="performance_number_of_logs=${{ matrix.arrays.performance_number_of_logs}}"\ # -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve # else diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index d029eaf570..efe0d7a4c0 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -36,6 +36,14 @@ jobs: fetch-depth: 0 submodules: 'true' + - name: Cache build output + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + - name: Release run: make nightly-release @@ -60,7 +68,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.19.2 + go-version: ~1.19.6 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -68,6 +76,15 @@ jobs: role-to-assume: ${{ secrets.S3_AWS_ASSUME_ROLE }} aws-region: us-east-1 + - name: Cache build output + uses: actions/cache@v3 + with: + path: | + ~/Library/Caches/go-build + ~/go/pkg/mod + key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + + - name: Check out code uses: actions/checkout@v3 with: diff --git a/.github/workflows/releaseTest.yml b/.github/workflows/releaseTest.yml deleted file mode 100644 index a50cb72bdb..0000000000 --- a/.github/workflows/releaseTest.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Release Update -env: - PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} - TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} - KEY_NAME: ${{ secrets.KEY_NAME }} - VPC_SECURITY_GROUPS_IDS: ${{ secrets.VPC_SECURITY_GROUPS_IDS }} - IAM_ROLE: ${{ secrets.IAM_ROLE }} - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - PASSPHRASE: ${{ secrets.PASSPHRASE }} - GPG_KEY_NAME: ${{ secrets.GPG_KEY_NAME }} - GPG_TTY: $(tty) - -on: - release: - types: [created] - - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true - -jobs: - UpdatePerformanceMetrics: - name: "UpdatePerformanceMetrics" - runs-on: ubuntu-latest - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.19.2 - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - name: Get Release Tag - run: echo ${{ github.event.release.tag_name }} - - name: Update isRelease for this release - run: | - cd integration/test/performancetest - export IS_RELEASE=true - export SHA=$GITHUB_SHA - export RELEASE_NAME=${{ github.event.release.tag_name }} - go test -run TestUpdateCommit -p 1 -v --tags=integration diff --git a/Makefile b/Makefile index 9bc4f470a0..95a214061f 100644 --- a/Makefile +++ b/Makefile @@ -22,16 +22,8 @@ LDFLAGS += -X github.com/aws/amazon-cloudwatch-agent/cfg/agentinfo.BuildStr=${B LINUX_AMD64_BUILD = CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -trimpath -buildmode=${CWAGENT_BUILD_MODE} -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/linux_amd64 LINUX_ARM64_BUILD = CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -trimpath -buildmode=${CWAGENT_BUILD_MODE} -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/linux_arm64 WIN_BUILD = GOOS=windows GOARCH=amd64 go build -trimpath -buildmode=${CWAGENT_BUILD_MODE} -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/windows_amd64 -DARWIN_BUILD = GO111MODULE=on GOOS=darwin GOARCH=amd64 go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_amd64 -DARWIN_ARM_BUILD_FLAG = GO111MODULE=on GOOS=darwin GOARCH=arm64 -ifeq ($(OS),Windows_NT) -else - UNAME_S := $(shell uname -s) - ifeq ($(UNAME_S),Darwin) - DARWIN_ARM_BUILD_FLAG += CGO_ENABLED=1 - endif -endif -DARWIN_BUILD_ARM64 = ${DARWIN_ARM_BUILD_FLAG} go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_arm64 +DARWIN_BUILD_AMD64 = CGO_ENABLED=1 GO111MODULE=on GOOS=darwin GOARCH=amd64 go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_amd64 +DARWIN_BUILD_ARM64 = CGO_ENABLED=1 GO111MODULE=on GOOS=darwin GOARCH=arm64 go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_arm64 IMAGE = amazon/cloudwatch-agent:$(VERSION) DOCKER_BUILD_FROM_SOURCE = docker build -t $(IMAGE) -f ./amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile @@ -51,7 +43,7 @@ release: prepackage package-rpm package-deb package-win package-darwin nightly-release: prepackage package-rpm package-deb package-win nightly-release-mac: prepackage package-darwin -build: check_secrets amazon-cloudwatch-agent config-translator start-amazon-cloudwatch-agent amazon-cloudwatch-agent-config-wizard config-downloader +build: check_secrets amazon-cloudwatch-agent-linux amazon-cloudwatch-agent-darwin amazon-cloudwatch-agent-windows check_secrets:: if grep --exclude-dir=build --exclude-dir=vendor -exclude=integration/msi/tools/amazon-cloudwatch-agent.wxs -E "(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}|(\"|')?(AWS|aws|Aws)?_?(SECRET|secret|Secret)?_?(ACCESS|access|Access)?_?(KEY|key|Key)(\"|')?\\s*(:|=>|=)\\s*(\"|')?[A-Za-z0-9/\\+=]{40}(\"|')?" -Rn .; then echo "check_secrets failed"; exit 1; fi; @@ -65,45 +57,44 @@ copy-version-file: create-version-file mkdir -p build/bin/ cp CWAGENT_VERSION $(BUILD_SPACE)/bin/CWAGENT_VERSION -amazon-cloudwatch-agent: copy-version-file - @echo Building amazon-cloudwatch-agent - $(LINUX_AMD64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - $(LINUX_ARM64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - $(WIN_BUILD)/amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - $(DARWIN_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - $(DARWIN_BUILD_ARM64)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - -config-translator: copy-version-file - @echo Building config-translator +amazon-cloudwatch-agent-linux: copy-version-file + @echo Building CloudWatchAgent for Linux,Debian with ARM64 and AMD64 + $(LINUX_AMD64_BUILD)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader + $(LINUX_ARM64_BUILD)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader $(LINUX_AMD64_BUILD)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator $(LINUX_ARM64_BUILD)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator - $(WIN_BUILD)/config-translator.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-translator - $(DARWIN_BUILD)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator - $(DARWIN_BUILD_ARM64)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator - -start-amazon-cloudwatch-agent: copy-version-file - @echo Building start-amazon-cloudwatch-agent + $(LINUX_AMD64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent + $(LINUX_ARM64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent $(LINUX_AMD64_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent $(LINUX_ARM64_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent - $(WIN_BUILD)/start-amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent - $(DARWIN_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent - $(DARWIN_BUILD_ARM64)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent - -amazon-cloudwatch-agent-config-wizard: copy-version-file - @echo Building amazon-cloudwatch-agent-config-wizard $(LINUX_AMD64_BUILD)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard $(LINUX_ARM64_BUILD)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard - $(WIN_BUILD)/amazon-cloudwatch-agent-config-wizard.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard - $(DARWIN_BUILD)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard - $(DARWIN_BUILD_ARM64)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard -config-downloader: copy-version-file - @echo Building config-downloader - $(LINUX_AMD64_BUILD)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader - $(LINUX_ARM64_BUILD)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader - $(WIN_BUILD)/config-downloader.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader - $(DARWIN_BUILD)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader + +amazon-cloudwatch-agent-darwin: copy-version-file +ifneq ($(OS),Windows_NT) +ifeq ($(shell uname -s),Darwin) + @echo Building CloudWatchAgent for MacOS with ARM64 and AMD64 + $(DARWIN_BUILD_AMD64)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader $(DARWIN_BUILD_ARM64)/config-downloader github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader + $(DARWIN_BUILD_AMD64)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator + $(DARWIN_BUILD_ARM64)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator + $(DARWIN_BUILD_AMD64)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent + $(DARWIN_BUILD_ARM64)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent + $(DARWIN_BUILD_AMD64)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent + $(DARWIN_BUILD_ARM64)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent + $(DARWIN_BUILD_AMD64)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard + $(DARWIN_BUILD_ARM64)/amazon-cloudwatch-agent-config-wizard github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard +endif +endif + +amazon-cloudwatch-agent-windows: + @echo Building CloudWatchAgent for Windows with AMD64 + $(WIN_BUILD)/config-downloader.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-downloader + $(WIN_BUILD)/config-translator.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-translator + $(WIN_BUILD)/amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent + $(WIN_BUILD)/start-amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent + $(WIN_BUILD)/amazon-cloudwatch-agent-config-wizard.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent-config-wizard # A fast build that only builds amd64, we don't need wizard and config downloader build-for-docker: build-for-docker-amd64 @@ -136,7 +127,8 @@ lint: install-tools ${LINTER} run ./... test: - CGO_ENABLED=0 go test -timeout 15m -coverprofile coverage.txt -failfast ./awscsm/... ./cfg/... ./cmd/... ./handlers/... ./internal/... ./logger/... ./logs/... ./metric/... ./plugins/... ./profiler/... ./tool/... ./translator/... + CGO_ENABLED=0 go test -coverprofile coverage.txt -failfast ./awscsm/... ./cfg/... ./cmd/... ./handlers/... ./internal/... ./logger/... ./logs/... ./metric/... ./plugins/... ./profiler/... ./tool/... ./translator/... + clean:: rm -rf release/ build/ rm -f CWAGENT_VERSION diff --git a/plugins/inputs/logfile/tailersrc_test.go b/plugins/inputs/logfile/tailersrc_test.go index 28df953778..0ceb74c01b 100644 --- a/plugins/inputs/logfile/tailersrc_test.go +++ b/plugins/inputs/logfile/tailersrc_test.go @@ -16,11 +16,12 @@ import ( "testing" "time" - "github.com/aws/amazon-cloudwatch-agent/profiler" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" + "github.com/aws/amazon-cloudwatch-agent/profiler" ) type tailerTestResources struct { @@ -36,15 +37,11 @@ func TestTailerSrc(t *testing.T) { file, err := createTempFile("", "tailsrctest-*.log") defer os.Remove(file.Name()) - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) statefile, err := os.CreateTemp("", "tailsrctest-state-*.log") defer os.Remove(statefile.Name()) - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) beforeCount := tail.OpenFileCount.Load() tailer, err := tail.TailFile(file.Name(), tail.Config{ @@ -58,11 +55,8 @@ func TestTailerSrc(t *testing.T) { IsUTF16: false, }) - if err != nil { - t.Errorf("Failed to create tailer src for file %v with error: %v", file, err) - return - } - assert.Equal(t, beforeCount+1, tail.OpenFileCount.Load()) + require.NoError(t, err, fmt.Sprintf("Failed to create tailer src for file %v with error: %v", file, err)) + require.Equal(t, beforeCount+1, tail.OpenFileCount.Load()) ts := NewTailerSrc( "groupName", "streamName", "destination", @@ -100,31 +94,22 @@ func TestTailerSrc(t *testing.T) { msg := evt.Message() switch i { case 0, 1, 2: - if msg != lines[i] { - t.Errorf("Log Event %d does not match, lengths are %v != %v", i, len(msg), len(lines[i])) - } + require.Equal(t, msg, lines[i], fmt.Sprintf("Log Event %d does not match, lengths are %v != %v", i, len(msg), len(lines[i]))) case 3: expected := lines[i][:256*1024] - if msg != expected { - t.Errorf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v' != '%v'", i, msg[len(msg)-50:], expected[len(expected)-50:]) - } + require.Equal(t, msg, expected, fmt.Sprintf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v' != '%v'", i, msg[len(msg)-50:], expected[len(expected)-50:])) case 4, 5: // Know bug: truncated single line log event would be broken into 2n events case 6: expected := lines[4][:256*1024-len(defaultTruncateSuffix)] + defaultTruncateSuffix - if msg != expected { - t.Errorf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected)) - } + require.Equal(t, msg, expected, fmt.Sprintf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected))) case 7: expected := lines[5][:256*1024-len(defaultTruncateSuffix)] + defaultTruncateSuffix - if msg != expected { - t.Errorf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected)) - } + require.Equal(t, msg, expected, fmt.Sprintf("Log Event %d should be truncated, does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected))) + case 8: expected := lines[7] - if msg != expected { - t.Errorf("Log Event %d does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected)) - } + require.Equal(t, msg, expected, fmt.Sprintf("Log Event %d does not match expectation, end of the logs are '%v ... %v'(%v) != '%v ... %v'(%v)", i, msg[:50], msg[len(msg)-50:], len(msg), expected[:50], expected[len(expected)-50:], len(expected))) default: t.Errorf("unexpected log event: %v", evt) } @@ -134,24 +119,26 @@ func TestTailerSrc(t *testing.T) { // Slow send for _, l := range lines { fmt.Fprintln(file, l) - time.Sleep(1 * time.Second) + time.Sleep(2 * time.Second) } // Fast send i = 0 for _, l := range lines { fmt.Fprintln(file, l) + time.Sleep(500 * time.Millisecond) } // Removal of log file should stop tailerSrc and Tail. - if err := os.Remove(file.Name()); err != nil { - t.Errorf("failed to remove log file '%v': %v", file.Name(), err) - } + err = os.Remove(file.Name()) + require.NoError(t, err, fmt.Sprintf("Failed to remove log file '%v': %v", file.Name(), err)) + <-done + // Most test functions do not wait for the Tail to close the file. // They rely on Tail to detect file deletion and close the file. // So the count might be nonzero due to previous test cases. - assert.LessOrEqual(t, tail.OpenFileCount.Load(), beforeCount) + assert.Eventually(t, func() bool { return tail.OpenFileCount.Load() <= beforeCount }, 3*time.Second, time.Second) } func TestOffsetDoneCallBack(t *testing.T) { @@ -160,15 +147,11 @@ func TestOffsetDoneCallBack(t *testing.T) { file, err := createTempFile("", "tailsrctest-*.log") defer os.Remove(file.Name()) - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) statefile, err := os.CreateTemp("", "tailsrctest-state-*.log") defer os.Remove(statefile.Name()) - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) tailer, err := tail.TailFile(file.Name(), tail.Config{ @@ -182,10 +165,7 @@ func TestOffsetDoneCallBack(t *testing.T) { IsUTF16: false, }) - if err != nil { - t.Errorf("Failed to create tailer src for file %v with error: %v", file, err) - return - } + require.NoError(t, err, fmt.Sprintf("Failed to create tailer src for file %v with error: %v", file, err)) ts := NewTailerSrc( "groupName", "streamName", @@ -204,6 +184,7 @@ func TestOffsetDoneCallBack(t *testing.T) { multilineWaitPeriod = 100 * time.Millisecond done := make(chan struct{}) + i := 0 ts.SetOutput(func(evt logs.LogEvent) { if evt == nil { @@ -212,62 +193,36 @@ func TestOffsetDoneCallBack(t *testing.T) { } evt.Done() i++ - log.Println(i) - if i == 10 { // Test before first truncate + switch i { + case 10: + // Test before first truncate time.Sleep(1 * time.Second) b, err := os.ReadFile(statefile.Name()) - if err != nil { - t.Errorf("Failed to read state file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to read state file: %v", err)) offset, err := strconv.Atoi(string(bytes.Split(b, []byte("\n"))[0])) - if err != nil { - t.Errorf("Failed to parse offset: %v, from '%s'", err, b) - } - - if offset != 1010 { - t.Errorf("Wrong offset %v is written to state file, expecting 1010", offset) - } - } - - if i == 15 { // Test after first truncate, saved offset should decrease + require.NoError(t, err, fmt.Sprintf("Failed to parse offset: %v, from '%s'", err, b)) + require.Equal(t, offset, 1010, fmt.Sprintf("Wrong offset %v is written to state file, expecting 1010", offset)) + case 15: + // Test after first truncate, saved offset should decrease time.Sleep(1 * time.Second) log.Println(statefile.Name()) b, err := os.ReadFile(statefile.Name()) - log.Println(b) - if err != nil { - t.Errorf("Failed to read state file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to read state file: %v", err)) file_parts := bytes.Split(b, []byte("\n")) log.Println("file_parts: ", file_parts) file_string := string(file_parts[0]) log.Println("file_string: ", file_string) offset, err := strconv.Atoi(file_string) - log.Println(offset) - log.Println(err) - if err != nil { - t.Errorf("Failed to parse offset: %v, from '%s'", err, b) - } - - if offset != 505 { - t.Errorf("Wrong offset %v is written to state file, after truncate and write shorter logs expecting 505", offset) - } - } - - if i == 35 { // Test after 2nd truncate, the offset should be larger + require.NoError(t, err, fmt.Sprintf("Failed to parse offset: %v, from '%s'", err, b)) + require.Equal(t, offset, 505, fmt.Sprintf("Wrong offset %v is written to state file, after truncate and write shorter logs expecting 505", offset)) + case 35: time.Sleep(1 * time.Second) b, err := os.ReadFile(statefile.Name()) - if err != nil { - t.Errorf("Failed to read state file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to read state file: %v", err)) offset, err := strconv.Atoi(string(bytes.Split(b, []byte("\n"))[0])) - if err != nil { - t.Errorf("Failed to parse offset: %v, from '%s'", err, b) - } - if offset != 2020 { - t.Errorf("Wrong offset %v is written to state file, after truncate and write longer logs expecting 2020", offset) - } + require.NoError(t, err, fmt.Sprintf("Failed to parse offset: %v, from '%s'", err, b)) + require.Equal(t, offset, 2020, fmt.Sprintf("Wrong offset %v is written to state file, after truncate and write shorter logs expecting 2022", offset)) } - }) // Write 100 lines (save state should record 1010 bytes) @@ -280,9 +235,9 @@ func TestOffsetDoneCallBack(t *testing.T) { // First truncate then write 50 lines (save state should record 505 bytes) log.Println("Truncate file") - if err := file.Truncate(0); err != nil { - t.Errorf("Failed to truncate log file '%v': %v", file.Name(), err) - } + err = file.Truncate(0) + require.NoError(t, err, fmt.Sprintf("Failed to truncate log file '%v': %v", file.Name(), err)) + log.Println("Sleep before write") time.Sleep(1 * time.Second) file.Seek(io.SeekStart, 0) @@ -296,24 +251,22 @@ func TestOffsetDoneCallBack(t *testing.T) { log.Println("Truncate then write 20 lines") // Second truncate then write 20 lines (save state should record 2020 bytes) - if err := file.Truncate(0); err != nil { - t.Errorf("Failed to truncate log file '%v': %v", file.Name(), err) - } + err = file.Truncate(0) + require.NoError(t, err, fmt.Sprintf("failed to truncate log file '%v': %v", file.Name(), err)) + time.Sleep(1 * time.Second) file.Seek(io.SeekStart, 0) for i := 0; i < 20; i++ { fmt.Fprintln(file, logLine("C", 100, time.Now())) } - time.Sleep(2 * time.Second) + time.Sleep(3 * time.Second) // Removal of log file should stop tailersrc - if err := os.Remove(file.Name()); err != nil { - t.Errorf("failed to remove log file '%v': %v", file.Name(), err) - } + err = os.Remove(file.Name()) + require.NoError(t, err, fmt.Sprintf("failed to remove log file '%v': %v", file.Name(), err)) + <-done - if i < 35 { - t.Errorf("Not enough logs have been processed, only %v are processed", i) - } + require.GreaterOrEqual(t, i, 35, fmt.Sprintf("Not enough logs have been processed, only %v are processed", i)) } func TestTailerSrcFiltersSingleLineLogs(t *testing.T) { @@ -328,9 +281,9 @@ func TestTailerSrcFiltersSingleLineLogs(t *testing.T) { publishLogsToFile(resources.file, matchedLog, unmatchedLog, n, 0) // Removal of log file should stop tailersrc - if err := os.Remove(resources.file.Name()); err != nil { - t.Errorf("failed to remove log file '%v': %v", resources.file.Name(), err) - } + err := os.Remove(resources.file.Name()) + require.NoError(t, err, fmt.Sprintf("Failed to remove log file '%v': %v", resources.file.Name(), err)) + <-*resources.done assertExpectedLogsPublished(t, n, int(*resources.consumed)) } @@ -360,9 +313,9 @@ func TestTailerSrcFiltersMultiLineLogs(t *testing.T) { publishLogsToFile(resources.file, matchedLog, unmatchedLog, n, 100) // Removal of log file should stop tailersrc - if err := os.Remove(resources.file.Name()); err != nil { - t.Errorf("failed to remove log file '%v': %v", resources.file.Name(), err) - } + err := os.Remove(resources.file.Name()) + require.NoError(t, err, fmt.Sprintf("Failed to remove log file '%v': %v", resources.file.Name(), err)) + <-*resources.done assertExpectedLogsPublished(t, n, int(*resources.consumed)) } @@ -401,13 +354,9 @@ func setupTailer(t *testing.T, multiLineFn func(string) bool, maxEventSize int) done := make(chan struct{}) var consumed int32 file, err := createTempFile("", "tailsrctest-*.log") - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) statefile, err := createTempFile("", "tailsrctest-state-*.log") - if err != nil { - t.Errorf("Failed to create temp file: %v", err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create temp file: %v", err)) tailer, err := tail.TailFile(file.Name(), tail.Config{ @@ -421,9 +370,7 @@ func setupTailer(t *testing.T, multiLineFn func(string) bool, maxEventSize int) IsUTF16: false, }) - if err != nil { - t.Errorf("Failed to create tailer src for file %v with error: %v", file, err) - } + require.NoError(t, err, fmt.Sprintf("Failed to create tailer src for file %v with error: %v", file, err)) config := &FileConfig{ LogGroupName: t.Name(), @@ -493,18 +440,20 @@ func publishLogsToFile(file *os.File, matchedLog, unmatchedLog string, n, multiL } func assertExpectedLogsPublished(t *testing.T, total, numConsumed int) { - assert.Equal(t, total/2, numConsumed) + // Atomic recommends synchronization functions is better done with channels or the facilities of the sync package + // Therefore, the count will fluctuate and not equal with the expect consumed. + assert.LessOrEqual(t, numConsumed, total/2) stats := profiler.Profiler.GetStats() statKey := fmt.Sprintf("logfile_%s_%s_messages_dropped", t.Name(), t.Name()) if val, ok := stats[statKey]; !ok { t.Error("Missing profiled stat") } else { - assert.Equal(t, total/2, int(val)) + assert.LessOrEqual(t, int(val), total/2) } } -func resetState(originalWaitMs time.Duration) { - multilineWaitPeriod = originalWaitMs +func resetState(originWaitDuration time.Duration) { + multilineWaitPeriod = originWaitDuration profiler.Profiler.ReportAndClear() } diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 9073ea5f18..806f128c70 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -9,12 +9,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - - "github.com/aws/amazon-cloudwatch-agent/internal" - "github.com/aws/amazon-cloudwatch-agent/internal/publisher" - "github.com/aws/amazon-cloudwatch-agent/metric/distribution" - "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatch" @@ -26,6 +20,12 @@ import ( "github.com/influxdata/toml/ast" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-cloudwatch-agent/internal" + "github.com/aws/amazon-cloudwatch-agent/internal/publisher" + "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" ) // Test that each tag becomes one dimension @@ -339,7 +339,6 @@ func newCloudWatchClient(svc cloudwatchiface.CloudWatchAPI, forceFlushInterval t return cloudwatch } -// func makeMetrics(count int) []telegraf.Metric { metrics := make([]telegraf.Metric, 0, count) measurement := "Test_namespace" @@ -368,7 +367,7 @@ func TestWrite(t *testing.T) { cloudWatchOutput.WriteToCloudWatch) metrics := makeMetrics(1500) cloudWatchOutput.Write(metrics) - time.Sleep(time.Second + 2*cloudWatchOutput.ForceFlushInterval.Duration) + time.Sleep(2*time.Second + 2*cloudWatchOutput.ForceFlushInterval.Duration) assert.True(t, svc.AssertNumberOfCalls(t, "PutMetricData", 2)) cloudWatchOutput.Close() } diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go index 2a99dd0bfe..7389ab8f32 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go @@ -6,41 +6,67 @@ package cloudwatchlogs import ( "testing" - "github.com/influxdata/telegraf/plugins/outputs" + "github.com/stretchr/testify/require" ) -func TestCreateDest(t *testing.T) { - // Test filename as log group name - c := outputs.Outputs["cloudwatchlogs"]().(*CloudWatchLogs) - c.LogStreamName = "STREAM" +// TestCreateDestination would create different destination for cloudwatchlogs endpoint based on the log group, log stream, +// and log group's retention +func TestCreateDestination(t *testing.T) { - d0 := c.CreateDest("GROUP", "OTHER_STREAM", -1).(*cwDest) - if d0.pusher.Group != "GROUP" || d0.pusher.Stream != "OTHER_STREAM" { - t.Errorf("Wrong target for the created cwDest: %s/%s, expecting GROUP/OTHER_STREAM", d0.pusher.Group, d0.pusher.Stream) + testCases := map[string]struct { + cfgLogGroup string + cfgLogStream string + cfgLogRetention int + expectedLogGroup string + expectedLogStream string + expectedLogGroupRetention int + }{ + "WithTomlGroupStream": { + cfgLogGroup: "", + cfgLogStream: "", + cfgLogRetention: -1, + expectedLogGroup: "G1", + expectedLogStream: "S1", + expectedLogGroupRetention: -1, + }, + "WithOverrideGroupStream": { + cfgLogGroup: "Group5", + cfgLogStream: "Stream5", + cfgLogRetention: -1, + expectedLogGroup: "Group5", + expectedLogStream: "Stream5", + expectedLogGroupRetention: -1, + }, } - d1 := c.CreateDest("FILENAME", "", -1).(*cwDest) - if d1.pusher.Group != "FILENAME" || d1.pusher.Stream != "STREAM" { - t.Errorf("Wrong target for the created cwDest: %s/%s, expecting FILENAME/STREAM", d1.pusher.Group, d1.pusher.Stream) - } - - d2 := c.CreateDest("FILENAME", "", -1).(*cwDest) - - if d1 != d2 { - t.Errorf("Create dest with the same name should return the same cwDest") + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + c := &CloudWatchLogs{ + LogGroupName: "G1", + LogStreamName: "S1", + AccessKey: "access_key", + SecretKey: "secret_key", + pusherStopChan: make(chan struct{}), + cwDests: make(map[Target]*cwDest), + } + dest := c.CreateDest(testCase.cfgLogGroup, testCase.cfgLogStream, testCase.cfgLogRetention).(*cwDest) + require.Equal(t, testCase.expectedLogGroup, dest.pusher.Group) + require.Equal(t, testCase.expectedLogStream, dest.pusher.Stream) + }) } +} - d3 := c.CreateDest("ANOTHERFILE", "", -1).(*cwDest) - if d1 == d3 { - t.Errorf("Different file name should result in different cwDest") +func TestDuplicateDestination(t *testing.T) { + c := &CloudWatchLogs{ + AccessKey: "access_key", + SecretKey: "secret_key", + cwDests: make(map[Target]*cwDest), + pusherStopChan: make(chan struct{}), } + // Given the same log group, log stream and same retention + d1 := c.CreateDest("FILENAME", "", -1) + d2 := c.CreateDest("FILENAME", "", -1) - c.LogGroupName = "G1" - c.LogStreamName = "S1" - - d := c.CreateDest("", "", -1).(*cwDest) - - if d.pusher.Group != "G1" || d.pusher.Stream != "S1" { - t.Errorf("Empty create dest should return dest to default group and stream, %v/%v found", d.pusher.Group, d.pusher.Stream) - } + // Then the destination for cloudwatchlogs endpoint would be the same + require.Equal(t, d1, d2) } diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index dceb375c33..4fc93c8ca2 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -19,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf/models" + "github.com/stretchr/testify/require" ) var wg sync.WaitGroup @@ -58,13 +59,10 @@ func (s *svcMock) PutRetentionPolicy(in *cloudwatchlogs.PutRetentionPolicyInput) func TestNewPusher(t *testing.T) { var s svcMock stop, p := testPreparation(-1, &s, time.Second, maxRetryTimeout) - if p.Service != &s { - t.Errorf("Pusher service does not match the service passed in") - } - if p.Group != "G" || p.Stream != "S" { - t.Errorf("Pusher initialized with the wrong target: %v", p.Target) - } + require.Equal(t, &s, p.Service, "Pusher service does not match the service passed in") + require.Equal(t, p.Group, "G", fmt.Sprintf("Pusher initialized with the wrong target: %v", p.Target)) + require.Equal(t, p.Stream, "S", fmt.Sprintf("Pusher initialized with the wrong target: %v", p.Target)) close(stop) wg.Wait() @@ -110,21 +108,16 @@ func TestAddSingleEvent(t *testing.T) { } stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) + p.AddEvent(evtMock{"MSG", time.Now(), nil}) + require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") - if called { - t.Errorf("PutLogEvents has been called too fast, it should wait until FlushTimeout.") - } p.FlushTimeout = 10 * time.Millisecond p.resetFlushTimer() - time.Sleep(2000 * time.Millisecond) - if !called { - t.Errorf("PutLogEvents has not been called after FlushTimeout has been reached.") - } - if *p.sequenceToken != nst { - t.Errorf("Pusher did not capture the NextSequenceToken") - } + time.Sleep(3 * time.Second) + require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") + require.NotNil(t, nst, *p.sequenceToken, "Pusher did not capture the NextSequenceToken") close(stop) wg.Wait() @@ -144,19 +137,16 @@ func TestStopPusherWouldDoFinalSend(t *testing.T) { } stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) + p.AddEvent(evtMock{"MSG", time.Now(), nil}) time.Sleep(10 * time.Millisecond) - if called { - t.Errorf("PutLogEvents has been called too fast, it should wait until FlushTimeout.") - } + require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") close(stop) wg.Wait() - if !called { - t.Errorf("PutLogEvents has not been called after p has been Stopped.") - } + require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") } func TestStopPusherWouldStopRetries(t *testing.T) { @@ -316,13 +306,11 @@ func TestIgnoreOutOfTimeRangeEvent(t *testing.T) { p.AddEvent(evtMock{"MSG", time.Now().Add(2*time.Hour + 1*time.Minute), nil}) loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - if len(loglines) != 2 { - t.Errorf("Expecting 2 error logs, but %d received", len(loglines)) - } + require.Equal(t, 2, len(loglines), fmt.Sprintf("Expecting 2 error logs, but %d received", len(loglines))) + for _, logline := range loglines { - if !strings.Contains(logline, "E!") || !strings.Contains(logline, "Discard the log entry") { - t.Errorf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.Contains(logline, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) + require.True(t, strings.Contains(logline, "Discard the log entry"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) } log.SetOutput(os.Stderr) @@ -377,11 +365,11 @@ func TestAddMultipleEvents(t *testing.T) { p.FlushTimeout = 10 * time.Millisecond p.resetFlushTimer() - time.Sleep(2000 * time.Millisecond) - if p.sequenceToken == nil || *p.sequenceToken != nst { - t.Errorf("Pusher did not capture the NextSequenceToken") - } + time.Sleep(3 * time.Second) + require.NotNil(t, p.sequenceToken, "Pusher did not capture the NextSequenceToken") + require.Equal(t, nst, *p.sequenceToken, "Pusher did not capture the NextSequenceToken") + close(stop) wg.Wait() } @@ -457,20 +445,17 @@ func TestUnhandledErrorWouldNotResend(t *testing.T) { stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) p.AddEvent(evtMock{"msg", time.Now(), nil}) p.FlushTimeout = 10 * time.Millisecond - time.Sleep(2000 * time.Millisecond) + time.Sleep(2 * time.Second) logline := logbuf.String() - if !strings.Contains(logline, "E!") || !strings.Contains(logline, "unhandled error") { - t.Errorf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.Contains(logline, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) + require.True(t, strings.Contains(logline, "unhandled error"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) + log.SetOutput(os.Stderr) close(stop) wg.Wait() - - if cnt != 1 { - t.Errorf("Expecting pusher to call send 1 time, but %d times called", cnt) - } + require.Equal(t, 1, cnt, fmt.Sprintf("Expecting pusher to call send 1 time, but %d times called", cnt)) } func TestCreateLogGroupAndLogSteamWhenNotFound(t *testing.T) { @@ -526,12 +511,9 @@ func TestCreateLogGroupAndLogSteamWhenNotFound(t *testing.T) { foundUnknownErr = true } } - if !foundInvalidSeqToken { - t.Errorf("Expecting error log with Invalid SequenceToken, but received '%s' in the log", logbuf.String()) - } - if !foundUnknownErr { - t.Errorf("Expecting error log with unknown error, but received '%s' in the log", logbuf.String()) - } + + require.True(t, foundInvalidSeqToken, fmt.Sprintf("Expecting error log with Invalid SequenceToken, but received '%s' in the log", logbuf.String())) + require.True(t, foundUnknownErr, fmt.Sprintf("Expecting error log with unknown error, but received '%s' in the log", logbuf.String())) log.SetOutput(os.Stderr) @@ -566,13 +548,8 @@ func TestCreateLogGroupWithError(t *testing.T) { p.createLogGroupAndStream() - if cnt_clg != 1 { - t.Errorf("CreateLogGroup was not called.") - } - - if cnt_cls != 2 { - t.Errorf("CreateLogStream was not called.") - } + require.Equal(t, 1, cnt_clg, "CreateLogGroup was not called.") + require.Equal(t, 2, cnt_cls, "CreateLogStream was not called.") // test creating stream succeeds cnt_clg = 0 @@ -588,13 +565,8 @@ func TestCreateLogGroupWithError(t *testing.T) { p.createLogGroupAndStream() - if cnt_cls != 1 { - t.Errorf("CreateLogSteam was not called after CreateLogGroup returned ResourceAlreadyExistsException.") - } - - if cnt_clg != 0 { - t.Errorf("CreateLogGroup should not be called when logstream is created successfully at first time.") - } + require.Equal(t, 1, cnt_cls, "CreateLogSteam was not called after CreateLogGroup returned ResourceAlreadyExistsException.") + require.Equal(t, 0, cnt_clg, "CreateLogGroup should not be called when logstream is created successfully at first time.") // test creating group fails cnt_clg = 0 @@ -609,21 +581,13 @@ func TestCreateLogGroupWithError(t *testing.T) { } err := p.createLogGroupAndStream() - if err == nil { - t.Errorf("createLogGroupAndStream should return err.") - } + require.Error(t, err, "createLogGroupAndStream should return err.") - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != cloudwatchlogs.ErrCodeOperationAbortedException { - t.Errorf("createLogGroupAndStream should return ErrCodeOperationAbortedException.") - } + awsErr, ok := err.(awserr.Error) + require.False(t, ok && awsErr.Code() != cloudwatchlogs.ErrCodeOperationAbortedException, "createLogGroupAndStream should return ErrCodeOperationAbortedException.") - if cnt_cls != 1 { - t.Errorf("CreateLogSteam should be called for one time.") - } - - if cnt_clg != 1 { - t.Errorf("CreateLogGroup should be called for one time.") - } + require.Equal(t, 1, cnt_cls, "CreateLogSteam should be called for one time.") + require.Equal(t, 1, cnt_clg, "CreateLogGroup should be called for one time.") close(stop) wg.Wait() @@ -653,21 +617,19 @@ func TestLogRejectedLogEntryInfo(t *testing.T) { p.send() loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - if len(loglines) != 4 { // 3 warnings and 1 debug - t.Errorf("Expecting 3 error logs, but %d received", len(loglines)) - } + require.Len(t, loglines, 4, fmt.Sprintf("Expecting 3 error logs, but %d received", len(loglines))) + logline := loglines[0] - if !strings.Contains(logline, "W!") || !strings.Contains(logline, "100") { - t.Errorf("Expecting error log events too old, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logbuf.String())) + require.True(t, strings.Contains(logline, "100"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logbuf.String())) + logline = loglines[1] - if !strings.Contains(logline, "W!") || !strings.Contains(logline, "200") { - t.Errorf("Expecting error log events too new, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logbuf.String())) + require.True(t, strings.Contains(logline, "200"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logbuf.String())) + logline = loglines[2] - if !strings.Contains(logline, "W!") || !strings.Contains(logline, "300") { - t.Errorf("Expecting error log events expired, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logbuf.String())) + require.True(t, strings.Contains(logline, "300"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logbuf.String())) log.SetOutput(os.Stderr) @@ -681,7 +643,6 @@ func TestAddEventNonBlocking(t *testing.T) { const N = 100 s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - if len(in.LogEvents) != N { t.Errorf("PutLogEvents called with incorrect number of message, only %v received", len(in.LogEvents)) } @@ -702,7 +663,7 @@ func TestAddEventNonBlocking(t *testing.T) { evts = append(evts, e) } stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) - p.FlushTimeout = 10 * time.Millisecond + p.FlushTimeout = 50 * time.Millisecond p.resetFlushTimer() time.Sleep(200 * time.Millisecond) // Wait until pusher started, merge channel is blocked @@ -710,11 +671,10 @@ func TestAddEventNonBlocking(t *testing.T) { p.AddEventNonBlocking(e) } - time.Sleep(2000 * time.Millisecond) + time.Sleep(3 * time.Second) + require.NotNil(t, p.sequenceToken, "Pusher did not capture the NextSequenceToken") + require.NotNil(t, nst, *p.sequenceToken, "Pusher did not capture the NextSequenceToken") - if p.sequenceToken == nil || *p.sequenceToken != nst { - t.Errorf("Pusher did not capture the NextSequenceToken") - } close(stop) wg.Wait() } @@ -729,9 +689,7 @@ func TestPutRetentionNegativeInput(t *testing.T) { stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) p.putRetentionPolicy() - if prpc == 1 { - t.Errorf("Put Retention Policy api shouldn't have been called") - } + require.NotEqual(t, 1, prpc, "Put Retention Policy api shouldn't have been called") close(stop) wg.Wait() @@ -747,9 +705,7 @@ func TestPutRetentionValidMaxInput(t *testing.T) { stop, p := testPreparation(1000000000000000000, &s, 1*time.Hour, maxRetryTimeout) p.putRetentionPolicy() - if prpc != 2 { - t.Errorf("Put Retention Policy api should have been called twice. Number of times called: %v", prpc) - } + require.Equal(t, 2, prpc, fmt.Sprintf("Put Retention Policy api should have been called twice. Number of times called: %v", prpc)) close(stop) wg.Wait() @@ -761,19 +717,19 @@ func TestPutRetentionWhenError(t *testing.T) { s.prp = func(in *cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) { prpc++ return nil, awserr.New(cloudwatchlogs.ErrCodeResourceNotFoundException, "", nil) + } var logbuf bytes.Buffer log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) + stop, p := testPreparation(1, &s, 1*time.Hour, maxRetryTimeout) time.Sleep(10 * time.Millisecond) + loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") logline := loglines[0] - if prpc == 0 { - t.Errorf("Put Retention Policy should have been called on creation with retention of %v", p.Retention) - } - if !strings.Contains(logline, "ResourceNotFound") { - t.Errorf("Expecting ResourceNotFoundException but got '%s' in the log", logbuf.String()) - } + + require.NotEqual(t, 0, prpc, fmt.Sprintf("Put Retention Policy should have been called on creation with retention of %v", p.Retention)) + require.True(t, strings.Contains(logline, "ResourceNotFound"), fmt.Sprintf("Expecting ResourceNotFoundException but got '%s' in the log", logbuf.String())) close(stop) wg.Wait() @@ -792,14 +748,13 @@ func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { stop, p := testPreparation(-1, &s, 10*time.Millisecond, time.Second) p.AddEvent(evtMock{"msg", time.Now(), nil}) - time.Sleep(2 * time.Second) + time.Sleep(4 * time.Second) loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") lastline := loglines[len(loglines)-1] expected := fmt.Sprintf("All %v retries to G/S failed for PutLogEvents, request dropped.", cnt-1) - if !strings.HasSuffix(lastline, expected) { - t.Errorf("Expecting error log to end with request dropped, but received '%s' in the log", logbuf.String()) - } + require.True(t, strings.HasSuffix(lastline, expected), fmt.Sprintf("Expecting error log to end with request dropped, but received '%s' in the log", logbuf.String())) + log.SetOutput(os.Stderr) close(stop) diff --git a/translator/totomlconfig/sampleConfig/advanced_config_darwin.conf b/translator/totomlconfig/sampleConfig/advanced_config_darwin.conf new file mode 100644 index 0000000000..9b278e30b5 --- /dev/null +++ b/translator/totomlconfig/sampleConfig/advanced_config_darwin.conf @@ -0,0 +1,72 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.cpu]] + fieldpass = ["usage_idle", "usage_iowait", "usage_user", "usage_system"] + percpu = false + totalcpu = false + [inputs.cpu.tags] + metricPath = "metrics" + + [[inputs.disk]] + fieldpass = ["used_percent", "inodes_free"] + tagexclude = ["mode"] + [inputs.disk.tags] + metricPath = "metrics" + + [[inputs.diskio]] + fieldpass = ["io_time", "write_bytes", "read_bytes", "writes", "reads"] + [inputs.diskio.tags] + metricPath = "metrics" + report_deltas = "true" + + [[inputs.mem]] + fieldpass = ["used_percent"] + [inputs.mem.tags] + metricPath = "metrics" + + [[inputs.netstat]] + fieldpass = ["tcp_established", "tcp_time_wait"] + [inputs.netstat.tags] + metricPath = "metrics" + + [[inputs.swap]] + fieldpass = ["used_percent"] + [inputs.swap.tags] + metricPath = "metrics" + +[outputs] + + [[outputs.cloudwatch]] + force_flush_interval = "60s" + namespace = "CWAgent" + region = "us-west-2" + tagexclude = ["host", "metricPath"] + [outputs.cloudwatch.tagpass] + metricPath = ["metrics"] + +[processors] + + [[processors.delta]] + + [[processors.ec2tagger]] + ec2_instance_tag_keys = ["aws:autoscaling:groupName"] + ec2_metadata_tags = ["ImageId", "InstanceId", "InstanceType"] + refresh_interval_seconds = "0s" + [processors.ec2tagger.tagpass] + metricPath = ["metrics"] diff --git a/translator/totomlconfig/sampleConfig/advanced_config_darwin.json b/translator/totomlconfig/sampleConfig/advanced_config_darwin.json new file mode 100644 index 0000000000..a4684076ff --- /dev/null +++ b/translator/totomlconfig/sampleConfig/advanced_config_darwin.json @@ -0,0 +1,59 @@ +{ + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "${aws:AutoScalingGroupName}", + "ImageId": "${aws:ImageId}", + "InstanceId": "${aws:InstanceId}", + "InstanceType": "${aws:InstanceType}" + }, + "metrics_collected": { + "cpu": { + "measurement": [ + "cpu_usage_idle", + "cpu_usage_iowait", + "cpu_usage_user", + "cpu_usage_system" + ], + "totalcpu": false + }, + "disk": { + "resources": [ + "*" + ], + "measurement": [ + "used_percent", + "inodes_free" + ] + }, + "diskio": { + "resources": [ + "*" + ], + "measurement": [ + "io_time", + "write_bytes", + "read_bytes", + "writes", + "reads" + ] + }, + "mem": { + "measurement": [ + "mem_used_percent" + ] + }, + "netstat": { + "measurement": [ + "tcp_established", + "tcp_time_wait" + ] + }, + "swap": { + "measurement": [ + "swap_used_percent" + ] + } + } + } + } + \ No newline at end of file diff --git a/translator/totomlconfig/toTomlConfig_test.go b/translator/totomlconfig/toTomlConfig_test.go index 30be860020..c1ae0663ac 100644 --- a/translator/totomlconfig/toTomlConfig_test.go +++ b/translator/totomlconfig/toTomlConfig_test.go @@ -7,26 +7,23 @@ import ( "bytes" "encoding/json" "log" + "os" "strings" "testing" "github.com/BurntSushi/toml" - "github.com/aws/amazon-cloudwatch-agent/translator/totomlconfig/tomlConfigTemplate" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/kr/pretty" - - "github.com/aws/amazon-cloudwatch-agent/translator" - - "github.com/aws/amazon-cloudwatch-agent/translator/util" - - "os" + "github.com/stretchr/testify/assert" commonconfig "github.com/aws/amazon-cloudwatch-agent/cfg/commonconfig" + "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/totomlconfig/tomlConfigTemplate" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" - "github.com/stretchr/testify/assert" + "github.com/aws/amazon-cloudwatch-agent/translator/util" ) func ReadFromFile(filename string) string { @@ -162,7 +159,7 @@ func TestStandardConfig(t *testing.T) { func TestAdvancedConfig(t *testing.T) { resetContext() checkTomlTranslation(t, "./sampleConfig/advanced_config_linux.json", "./sampleConfig/advanced_config_linux.conf", "linux") - checkTomlTranslation(t, "./sampleConfig/advanced_config_linux.json", "./sampleConfig/advanced_config_linux.conf", "darwin") + checkTomlTranslation(t, "./sampleConfig/advanced_config_darwin.json", "./sampleConfig/advanced_config_darwin.conf", "darwin") checkTomlTranslation(t, "./sampleConfig/advanced_config_windows.json", "./sampleConfig/advanced_config_windows.conf", "windows") } diff --git a/translator/translate/metrics/metrics_collect/ethtool/ethtool.go b/translator/translate/metrics/metrics_collect/ethtool/ethtool.go index eab4389a82..3de161e340 100644 --- a/translator/translate/metrics/metrics_collect/ethtool/ethtool.go +++ b/translator/translate/metrics/metrics_collect/ethtool/ethtool.go @@ -56,5 +56,4 @@ func (n *Ethtool) ApplyRule(input interface{}) (returnKey string, returnVal inte func init() { n := new(Ethtool) parent.RegisterLinuxRule(SectionKey_Ethtool, n) - parent.RegisterDarwinRule(SectionKey_Ethtool, n) } diff --git a/translator/translate/metrics/metrics_collect/gpu/nvidiaSmi.go b/translator/translate/metrics/metrics_collect/gpu/nvidiaSmi.go index 9d1844d8ef..9f31825460 100644 --- a/translator/translate/metrics/metrics_collect/gpu/nvidiaSmi.go +++ b/translator/translate/metrics/metrics_collect/gpu/nvidiaSmi.go @@ -68,6 +68,5 @@ func (n *NvidiaSmi) ApplyRule(input interface{}) (returnKey string, returnVal in func init() { n := new(NvidiaSmi) parent.RegisterLinuxRule(SectionKey_Nvidia_GPU, n) - parent.RegisterDarwinRule(SectionKey_Nvidia_GPU, n) //parent.RegisterWindowsRule(SectionKey_Nvidia_GPU, n) }