diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 13e3ef2f2563..0b0efac5ed68 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -22,6 +22,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "Auditbeat Mandatory Testing" key: "auditbeat-mandatory-tests" @@ -34,7 +38,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -53,7 +57,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_RHEL9}" @@ -71,7 +75,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -91,7 +95,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -112,7 +116,7 @@ steps: GOX_FLAGS: "-arch amd64" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -128,14 +132,14 @@ steps: steps: - label: ":linux: Auditbeat Ubuntu Integration Tests" key: "auditbeat-extended-integ-tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | set -euo pipefail cd auditbeat mage build integTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -149,14 +153,14 @@ steps: - label: ":linux: Auditbeat Ubuntu arm64 Integration Tests" key: "auditbeat-extended-arm64-integ-tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | set -euo pipefail cd auditbeat mage build integTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -177,7 +181,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -190,7 +194,7 @@ steps: context: "auditbeat: Linux arm64 Unit Tests" - label: ":mac: Auditbeat macOS x86_64 Unit Tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -198,7 +202,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -210,7 +214,7 @@ steps: context: "auditbeat: macOS x86_64 Unit Tests" - label: ":mac: Auditbeat macOS arm64 ARM Unit Tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -218,7 +222,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -241,7 +245,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -262,7 +266,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -283,7 +287,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -316,6 +320,10 @@ steps: set -euo pipefail cd auditbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: gcp image: "${IMAGE_UBUNTU_X86_64}" @@ -334,6 +342,10 @@ steps: set -euo pipefail cd auditbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml index d8a5354b1ef7..c0005309457e 100644 --- a/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml +++ b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml @@ -3,6 +3,10 @@ env: IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" + MODULE: "kubernetes" + + # Other deps + ASDF_KIND_VERSION: "0.20.0" steps: - group: "Deploy/K8S" @@ -10,71 +14,102 @@ steps: steps: - label: "Checks" - command: ".buildkite/deploy/kubernetes/scripts/make.sh" + command: | + set -euo pipefail + make -C deploy/kubernetes all + make check-no-changes agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: - context: "Deploy/k8s-checks" + context: "deploy/k8s checks" - label: "K8S Test/K8S version: v1.29.0" key: "k8s-test-129" env: K8S_VERSION: "v1.29.0" - commands: - - "MODULE=kubernetes make -C metricbeat integration-tests" - - "make -C deploy/kubernetes test" + MODULE: "${MODULE}" + commands: | + set -euo pipefail + source .buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh + echo "--- Executing Tests" + make -C metricbeat integration-tests + make -C deploy/kubernetes test + retry: + automatic: + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: - context: "Deploy/k8s-test v1.29.0" + context: "deploy/k8s test v1.29.0" - label: "K8S Test/K8S version: v1.28.0" key: "k8s-test-128" env: K8S_VERSION: "v1.28.0" - commands: - - "MODULE=kubernetes make -C metricbeat integration-tests" - - "make -C deploy/kubernetes test" + MODULE: "${MODULE}" + commands: | + set -euo pipefail + source .buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh + echo "--- Executing Tests" + make -C metricbeat integration-tests + make -C deploy/kubernetes test + retry: + automatic: + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: - context: "Deploy/k8s-test v1.28.0" + context: "deploy/k8s test v1.28.0" - label: "K8S Test/K8S version: v1.27.3" key: "k8s-test-1273" env: K8S_VERSION: "v1.27.3" - commands: - - "MODULE=kubernetes make -C metricbeat integration-tests" - - "make -C deploy/kubernetes test" + MODULE: "${MODULE}" + commands: | + set -euo pipefail + source .buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh + echo "--- Executing Tests" + make -C metricbeat integration-tests + make -C deploy/kubernetes test + retry: + automatic: + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: - context: "Deploy/k8s-test v1.27.3" + context: "deploy/k8s test v1.27.3" - label: "K8S Test/K8S version: v1.26.6" key: "k8s-test-1266" env: K8S_VERSION: "v1.26.6" - commands: - - "MODULE=kubernetes make -C metricbeat integration-tests" - - "make -C deploy/kubernetes test" + MODULE: "${MODULE}" + commands: | + set -euo pipefail + source .buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh + echo "--- Executing Tests" + make -C metricbeat integration-tests + make -C deploy/kubernetes test + retry: + automatic: + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: - context: "Deploy/k8s-test v1.26.6" + context: "deploy/k8s test v1.26.6" diff --git a/.buildkite/deploy/kubernetes/scripts/install-kind.sh b/.buildkite/deploy/kubernetes/scripts/install-kind.sh deleted file mode 100755 index 8c399d2de376..000000000000 --- a/.buildkite/deploy/kubernetes/scripts/install-kind.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -MSG="environment variable missing." -KIND_VERSION=${KIND_VERSION:?$MSG} -KIND_BINARY="${BIN}/kind" - -if command -v kind -then - set +e - echo "Found Kind. Checking version.." - FOUND_KIND_VERSION=$(kind --version 2>&1 >/dev/null | awk '{print $3}') - if [ "$FOUND_KIND_VERSION" == "$KIND_VERSION" ] - then - echo "--- Versions match. No need to install Kind. Exiting." - exit 0 - fi - set -e -fi - -echo "UNMET DEP: Installing Kind" - -OS=$(uname -s| tr '[:upper:]' '[:lower:]') -ARCH=$(uname -m| tr '[:upper:]' '[:lower:]') -if [ "${ARCH}" == "aarch64" ] ; then - ARCH_SUFFIX=arm64 -else - ARCH_SUFFIX=amd64 -fi - -if curl -sSLo "${KIND_BINARY}" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${OS}-${ARCH_SUFFIX}" ; then - chmod +x "${KIND_BINARY}" - echo "Kind installed: ${KIND_VERSION}" -else - echo "Something bad with the download, let's delete the corrupted binary" - if [ -e "${KIND_BINARY}" ] ; then - rm "${KIND_BINARY}" - fi - exit 1 -fi diff --git a/.buildkite/deploy/kubernetes/scripts/install-kubectl.sh b/.buildkite/deploy/kubernetes/scripts/install-kubectl.sh deleted file mode 100755 index 7f6c75bf3b92..000000000000 --- a/.buildkite/deploy/kubernetes/scripts/install-kubectl.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -MSG="parameter missing." -K8S_VERSION=${K8S_VERSION:?$MSG} -KUBECTL_BINARY="${BIN}/kubectl" - -if command -v kubectl -then - set +e - echo "Found kubectl. Checking version.." - FOUND_KUBECTL_VERSION=$(kubectl version --client --short 2>&1 >/dev/null | awk '{print $3}') - if [ "${FOUND_KUBECTL_VERSION}" == "${K8S_VERSION}" ] - then - echo "Kubectl Versions match. No need to install kubectl. Exiting." - exit 0 - fi - set -e -fi - -echo "UNMET DEP: Installing kubectl" - -OS=$(uname -s| tr '[:upper:]' '[:lower:]') -ARCH=$(uname -m| tr '[:upper:]' '[:lower:]') -if [ "${ARCH}" == "aarch64" ] ; then - ARCH_SUFFIX=arm64 -else - ARCH_SUFFIX=amd64 -fi - -if curl -sSLo "${KUBECTL_BINARY}" "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/${OS}/${ARCH_SUFFIX}/kubectl" ; then - chmod +x "${KUBECTL_BINARY}" - echo "Current K8S Version: ${K8S_VERSION}" - echo "Kubectl installed: ${KUBECTL_BINARY}" -else - echo "--- Something bad with the download, let's delete the corrupted binary" - if [ -e "${KUBECTL_BINARY}" ] ; then - rm "${KUBECTL_BINARY}" - fi - exit 1 -fi - diff --git a/.buildkite/deploy/kubernetes/scripts/make.sh b/.buildkite/deploy/kubernetes/scripts/make.sh deleted file mode 100755 index 4c9120a2d901..000000000000 --- a/.buildkite/deploy/kubernetes/scripts/make.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -echo "--- Checking K8S" -make -C deploy/kubernetes all -make check-no-changes diff --git a/.buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh b/.buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh index 3656318bd648..3a4418a18f93 100755 --- a/.buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh +++ b/.buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh @@ -4,11 +4,15 @@ set -euo pipefail source .buildkite/env-scripts/util.sh +export KUBECONFIG="${WORKSPACE}/kubecfg" +export BIN="${WORKSPACE}/bin" +add_bin_path + echo "--- Installing kind & kubectl" -retry_with_count 5 .buildkite/deploy/kubernetes/scripts/install-kind.sh -retry_with_count 5 .buildkite/deploy/kubernetes/scripts/install-kubectl.sh +asdf plugin add kind +asdf install kind $ASDF_KIND_VERSION -echo "--- Setting up kind" +echo "~~~ Setting up kind" max_retries=3 timeout=5 retries=0 diff --git a/.buildkite/env-scripts/env.sh b/.buildkite/env-scripts/env.sh index b30b26c3e8a4..58624e300e69 100644 --- a/.buildkite/env-scripts/env.sh +++ b/.buildkite/env-scripts/env.sh @@ -12,7 +12,6 @@ WORKSPACE="$(pwd)" BIN="${WORKSPACE}/bin" HW_TYPE="$(uname -m)" PLATFORM_TYPE="$(uname)" -TMP_FOLDER="tmp.${REPO}" SNAPSHOT="true" PYTEST_ADDOPTS="" OSS_MODULE_PATTERN="^[a-z0-9]+beat\\/module\\/([^\\/]+)\\/.*" @@ -38,7 +37,6 @@ export WORKSPACE export BIN export HW_TYPE export PLATFORM_TYPE -export TMP_FOLDER export SNAPSHOT export PYTEST_ADDOPTS export OSS_MODULE_PATTERN diff --git a/.buildkite/env-scripts/util.sh b/.buildkite/env-scripts/util.sh old mode 100644 new mode 100755 diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 053e8dbec419..c7ac3072a842 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -22,17 +22,21 @@ env: K8S_VERSION: "v1.29.0" ASDF_KIND_VERSION: "0.20.0" + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "Filebeat Mandatory Tests" key: "filebeat-mandatory-tests" steps: - - label: ":ubuntu: Ubuntu Unit Tests" + - label: ":ubuntu: Filebeat Unit Tests" command: | cd filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -42,15 +46,15 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Ubuntu Unit Tests" + context: "filebeat: Linux x86_64 Unit Tests" - - label: ":ubuntu: Ubuntu Go Integration Tests" + - label: ":ubuntu: Filebeat Go Integration Tests" command: | cd filebeat mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -60,15 +64,15 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Ubuntu Go Integration Tests" + context: "filebeat: Filebeat Go Integration Tests" - - label: ":ubuntu: Ubuntu Python Integration Tests" + - label: ":ubuntu: Filebeat Python Integration Tests" command: | cd filebeat mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: gcp image: "${IMAGE_UBUNTU_X86_64}" @@ -80,14 +84,14 @@ steps: - github_commit_status: context: "filebeat: Python Integration Tests" - - label: ":windows: Windows 2016 Unit Tests" + - label: ":windows: Filebeat Windows 2016 Unit Tests" key: "windows-2016-unit-tests" command: | Set-Location -Path filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -101,14 +105,14 @@ steps: - github_commit_status: context: "filebeat: Windows 2016 Unit Tests" - - label: ":windows: Windows 2022 Unit Tests" + - label: ":windows: Filebeat Windows 2022 Unit Tests" key: "windows-2022-unit-tests" command: | Set-Location -Path filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -124,10 +128,12 @@ steps: - group: "Filebeat Extended Tests" key: "filebeat-extended-tests" + if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS|arm).*/ + steps: - - label: ":mac: MacOS x64_64 Unit Tests" + - label: ":mac: Filebeat macOS x86_64 Unit Tests" key: "macos-unit-tests-extended" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -135,7 +141,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -144,11 +150,11 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended MacOS Unit Tests" + context: "filebeat: macOS x86_64 Unit Tests" - - label: ":mac: MacOS arm64 Unit Tests" + - label: ":mac: Filebeat macOS arm64 Unit Tests" key: "macos-arm64-unit-tests-extended" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -156,7 +162,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -165,9 +171,9 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended MacOS ARM Unit Tests" + context: "filebeat: macOS arm64 Unit Tests" - - label: ":linux: Ubuntu ARM Unit Tests" + - label: ":linux: Filebeat arm64 Unit Tests" key: "extended-arm64-unit-test" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ command: | @@ -175,7 +181,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -185,20 +191,21 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended Ubuntu ARM Unit Tests" + context: "filebeat: Linux arm64 Unit Tests" - - group: "Extended Windows Tests" + - group: "Filebeat Extended Windows Tests" key: "filebeat-extended-win-tests" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*[Ww]indows.*/ + steps: - - label: ":windows: Windows 2019 Unit Tests" + - label: ":windows: Filebeat Windows 2019 Unit Tests" key: "windows-extended-2019" command: | Set-Location -Path filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -210,16 +217,16 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended Windows 2019 Unit Tests" + context: "filebeat: Windows 2019 Unit Tests" - - label: ":windows: Windows 11 Unit Tests" + - label: ":windows: Filebeat Windows 11 Unit Tests" key: "windows-extended-11" command: | Set-Location -Path filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -231,16 +238,16 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended Windows 11 Unit Tests" + context: "filebeat: Windows 11 Unit Tests" - - label: ":windows: Windows 10 Unit Tests" + - label: ":windows: Filebeat Windows 10 Unit Tests" key: "windows-extended-10" command: | Set-Location -Path filebeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -252,7 +259,7 @@ steps: - "filebeat/build/*.json" notify: - github_commit_status: - context: "filebeat: Extended Windows 10 Unit Tests" + context: "filebeat: Windows 10 Unit Tests" - wait: ~ # with PRs, we want to run packaging only if mandatory tests succeed @@ -264,11 +271,15 @@ steps: - group: "Filebeat Packaging" key: "packaging" steps: - - label: ":linux: Packaging Linux" + - label: ":linux: Filebeat Packaging Linux" key: "packaging-linux" command: | cd filebeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -279,13 +290,17 @@ steps: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" notify: - github_commit_status: - context: "filebeat: Packaging" + context: "filebeat: Packaging Linux" - - label: ":linux: Packaging ARM" + - label: ":linux: Filebeat Packaging arm64" key: "packaging-arm" command: | cd filebeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -295,4 +310,4 @@ steps: PACKAGES: "docker" notify: - github_commit_status: - context: "filebeat: Packaging ARM" + context: "filebeat: Packaging arm64" diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index cadbcec1eca2..cdb3959c2533 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -21,6 +21,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "Heartbeat Mandatory Testing" key: "heartbeat-mandatory-tests" @@ -32,7 +36,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -50,7 +54,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_RHEL9}" @@ -69,7 +73,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -89,7 +93,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -103,12 +107,12 @@ steps: context: "Heartbeat: Win-2022 Unit Tests" - label: ":ubuntu: Heartbeat Go Integration Tests" - command: | + command: | cd heartbeat mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -126,7 +130,7 @@ steps: mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -150,7 +154,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -162,8 +166,7 @@ steps: - group: "Heartbeat Extended Testing MacOS" key: "heartbeat-extended-tests-macos" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ - + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ steps: - label: ":mac: Heartbeat MacOS Unit Tests" key: "macos-extended" @@ -174,7 +177,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -194,7 +197,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -212,12 +215,12 @@ steps: steps: - label: ":windows: Heartbeat Win-2019 Unit Tests" key: "heartbeat-win-extended-2019" - command: | + command: | Set-Location -Path heartbeat mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -237,7 +240,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -257,7 +260,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -285,6 +288,10 @@ steps: command: | cd heartbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: gcp image: "${IMAGE_UBUNTU_X86_64}" @@ -302,6 +309,10 @@ steps: command: | cd heartbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 870ece78925e..9a7228c5b2e3 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -11,7 +11,7 @@ PRIVATE_CI_GCS_CREDENTIALS_PATH="kv/ci-shared/platform-ingest/gcp-platform-inges DOCKER_REGISTRY_SECRET_PATH="kv/ci-shared/platform-ingest/docker_registry_prod" GITHUB_TOKEN_VAULT_PATH="kv/ci-shared/platform-ingest/github_token" -if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats" || "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then +if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats" ]]; then source .buildkite/env-scripts/env.sh if [[ -z "${GO_VERSION-""}" ]]; then export GO_VERSION=$(cat "${WORKSPACE}/.go-version") @@ -91,9 +91,3 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" ]]; then fi fi -if [[ "$BUILDKITE_PIPELINE_SLUG" == "deploy-k8s" ]]; then - source .buildkite/env-scripts/env.sh - if [[ "$BUILDKITE_STEP_KEY" == k8s-test* ]]; then - .buildkite/deploy/kubernetes/scripts/setup-k8s-env.sh - fi -fi diff --git a/.buildkite/hooks/scripts/util.sh b/.buildkite/hooks/scripts/util.sh index 8ef932725c6e..b441991e6feb 100755 --- a/.buildkite/hooks/scripts/util.sh +++ b/.buildkite/hooks/scripts/util.sh @@ -27,8 +27,8 @@ google_cloud_logout_active_account() { cleanup() { if [[ "$BUILDKITE_COMMAND" != *"buildkite-agent pipeline upload"* ]]; then echo "Deleting temporary files..." - if [[ -n "${BIN:-}" ]] && [[ -e "${BIN}/${TMP_FOLDER}" ]]; then - rm -rf "${BIN}/${TMP_FOLDER}.*" + if [[ -n "${BIN:-}" ]]; then + rm -rf "${BIN}" fi echo "Done." fi diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index bc77712c330b..fd485279858e 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -11,6 +11,10 @@ env: #Deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "Mandatory Tests" key: "mandatory-tests" @@ -23,7 +27,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -43,7 +47,7 @@ steps: mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -63,7 +67,7 @@ steps: mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -83,7 +87,7 @@ steps: make crosscompile retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -103,7 +107,7 @@ steps: make STRESS_TEST_OPTIONS='-timeout=20m -race -v -parallel 1' GOTEST_OUTPUT_OPTIONS=' | go-junit-report > libbeat-stress-test.xml' stress-tests retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -125,7 +129,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index d15212d2ef32..f4a04dbb0c2b 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -25,6 +25,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "Metricbeat Mandatory Tests" key: "metricbeat-mandatory-tests" @@ -34,7 +38,7 @@ steps: command: "cd metricbeat && mage build unitTest" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -53,7 +57,7 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet metricbeat - echo "~~~ Will run tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" # TODO move this section to base image / pre-command hook echo "~~~ Installing kind" @@ -67,7 +71,7 @@ steps: cd metricbeat && mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -86,7 +90,7 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet metricbeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" # TODO move this section to base image / pre-command hook echo "~~~ Installing kind" @@ -100,7 +104,7 @@ steps: cd metricbeat && mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -117,7 +121,7 @@ steps: command: "make -C metricbeat crosscompile" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -136,7 +140,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -157,7 +161,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -182,7 +186,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -203,7 +207,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -224,7 +228,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -240,7 +244,7 @@ steps: - group: "Metricbeat Extended MacOS Tests" key: "metricbeat-extended-macos-tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ steps: - label: ":mac: MacOS x64_64 Unit Tests" key: "extended-macos-x64-64-unit-tests" @@ -250,7 +254,7 @@ steps: cd metricbeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -270,7 +274,7 @@ steps: cd metricbeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -281,7 +285,6 @@ steps: - github_commit_status: context: "metricbeat: Extended MacOS arm64 Unit Tests" - - wait: ~ # with PRs, we want to run packaging only if mandatory tests succeed # for other cases, e.g. merge commits, we want to run packaging (and publish) independently of other tests @@ -295,6 +298,10 @@ steps: - label: ":linux: Packaging Linux" key: "packaging-linux" command: "cd metricbeat && mage package" + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -310,6 +317,10 @@ steps: - label: ":linux: Packaging ARM" key: "packaging-arm" command: "cd metricbeat && mage package" + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index d510107a89c2..74873046c56d 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -20,6 +20,10 @@ env: #Deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "packetbeat Mandatory Tests" key: "packetbeat-mandatory-tests" @@ -30,7 +34,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -48,7 +52,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_RHEL9_X86_64}" @@ -66,7 +70,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -86,7 +90,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -110,7 +114,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -131,7 +135,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -152,7 +156,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -171,7 +175,7 @@ steps: steps: - label: ":mac: MacOS x86_64 Unit Tests" key: "macos-x86-64-unit-tests-extended" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -179,7 +183,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -192,7 +196,7 @@ steps: - label: ":mac: MacOS arm64 Unit Tests" key: "macos-arm64-unit-tests-extended" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS|arm).*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*(macOS|arm).*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -200,7 +204,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -217,7 +221,7 @@ steps: if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" @@ -229,7 +233,6 @@ steps: - github_commit_status: context: "packetbeat: Extended Ubuntu ARM Unit Tests" - - wait: ~ # with PRs, we want to run packaging only if mandatory tests succeed # for other cases, e.g. merge commits, we want to run packaging (and publish) independently of other tests @@ -245,6 +248,10 @@ steps: command: | cd packetbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -262,6 +269,10 @@ steps: command: | cd packetbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 2c00f376cf26..66fca45bbff3 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -139,7 +139,7 @@ steps: commit: "${BUILDKITE_COMMIT}" branch: "${BUILDKITE_BRANCH}" - - label: "Trigger Xpack/Dockerlogbeat" + - label: "Trigger x-pack/dockerlogbeat" if: build.pull_request.id != null plugins: - monorepo-diff#v1.0.1: diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 1c541bb7896b..55affae41289 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -5,7 +5,7 @@ "pipelineSlug": "beats", "allow_org_users": true, "allowed_repo_permissions": ["admin", "write"], - "allowed_list": [ "mergify[bot]" ], + "allowed_list": ["dependabot[bot]", "mergify[bot]"], "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, @@ -21,7 +21,7 @@ "pipelineSlug": "beats-xpack-elastic-agent", "allow_org_users": true, "allowed_repo_permissions": ["admin", "write"], - "allowed_list": [ ], + "allowed_list": ["dependabot[bot]", "mergify[bot]"], "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, diff --git a/.buildkite/scripts/changesets.psm1 b/.buildkite/scripts/changesets.psm1 index 15fabd3eba5d..10e4d31a8b61 100644 --- a/.buildkite/scripts/changesets.psm1 +++ b/.buildkite/scripts/changesets.psm1 @@ -56,17 +56,9 @@ function DefineModuleFromTheChangeSet($projectPath) { } } - # TODO: remove this conditional when issue https://github.com/elastic/ingest-dev/issues/2993 gets resolved - if(!$changedModules) { - if($Env:BUILDKITE_PIPELINE_SLUG -eq 'beats-xpack-metricbeat') { - $Env:MODULE = "aws" - } - else { - $Env:MODULE = "kubernetes" - } - } - else { - # TODO: once https://github.com/elastic/ingest-dev/issues/2993 gets resolved, this should be the only thing we export - $Env:MODULE = $changedModules + if ($changedModules) { + $env:MODULE = $changedModules + Write-Output "~~~ Set env var MODULE to [$env:MODULE]" + Write-Output "~~~ Resuming commands" } } diff --git a/.buildkite/scripts/changesets.sh b/.buildkite/scripts/changesets.sh index 3b7ed5d7ec00..5c6fd6c7b0d8 100644 --- a/.buildkite/scripts/changesets.sh +++ b/.buildkite/scripts/changesets.sh @@ -68,13 +68,10 @@ defineModuleFromTheChangeSet() { fi done - if [[ -z "$changed_modules" ]]; then # TODO: remove this conditional when issue https://github.com/elastic/ingest-dev/issues/2993 gets resolved - if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-metricbeat" ]]; then - export MODULE="aws" - else - export MODULE="kubernetes" - fi - else - export MODULE="${changed_modules}" # TODO: once https://github.com/elastic/ingest-dev/issues/2993 gets resolved, this should be the only thing we export + # export MODULE="" leads to an infinite loop https://github.com/elastic/ingest-dev/issues/2993 + if [[ ! -z $changed_modules ]]; then + export MODULE="${changed_modules}" + echo "~~~ Set env var MODULE to [$MODULE]" + echo "~~~ Resuming commands" fi } diff --git a/.buildkite/scripts/common.sh b/.buildkite/scripts/common.sh index b37ac7494fdc..ed855df9970b 100755 --- a/.buildkite/scripts/common.sh +++ b/.buildkite/scripts/common.sh @@ -24,11 +24,6 @@ XPACK_MODULE_PATTERN="^x-pack\\/[a-z0-9]+beat\\/module\\/([^\\/]+)\\/.*" # define if needed run cloud-specific tests for the particular beat [ -z "${run_xpack_metricbeat_aws_tests+x}" ] && run_xpack_metricbeat_aws_tests="$(buildkite-agent meta-data get run_xpack_metricbeat_aws_tests --default "false")" - -xpack_dockerlogbeat_changeset=( - "^x-pack/dockerlogbeat/.*" - ) - ci_changeset=( "^.buildkite/.*" ) @@ -478,7 +473,7 @@ if are_paths_changed "${packaging_changeset[@]}" ; then export PACKAGING_CHANGES="true" fi -if [[ "$BUILDKITE_STEP_KEY" == "xpack-metricbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "xpack-dockerlogbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "metricbeat-pipeline" ]]; then +if [[ "$BUILDKITE_STEP_KEY" == "xpack-metricbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "metricbeat-pipeline" ]]; then # Set the MODULE env variable if possible, it should be defined before generating pipeline's steps. It is used in multiple pipelines. defineModuleFromTheChangeSet "${BEATS_PROJECT_NAME}" fi diff --git a/.buildkite/scripts/crosscompile.sh b/.buildkite/scripts/crosscompile.sh deleted file mode 100755 index 12f0f6574ca9..000000000000 --- a/.buildkite/scripts/crosscompile.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -source .buildkite/scripts/install_tools.sh - -set -euo pipefail - -echo "--- Run Crosscompile for $BEATS_PROJECT_NAME" -make -C "${BEATS_PROJECT_NAME}" crosscompile diff --git a/.buildkite/scripts/install_tools.sh b/.buildkite/scripts/install_tools.sh deleted file mode 100755 index 3d25cf8e5c98..000000000000 --- a/.buildkite/scripts/install_tools.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -source .buildkite/scripts/common.sh - -set -euo pipefail - -echo "--- Env preparation" - -# Temporary solution to fix the issues with "sudo apt get...." https://elastic.slack.com/archives/C0522G6FBNE/p1706003603442859?thread_ts=1706003209.424539&cid=C0522G6FBNE -# It could be removed when we use our own image for the BK agent. -if [ "${platform_type}" == "Linux" ]; then - if [ "${platform_type}" == "Linux" ]; then - if [ $(checkLinuxType) = "ubuntu" ]; then - DEBIAN_FRONTEND="noninteractive" - #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) - sudo mkdir -p /etc/needrestart - echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null - fi - fi -fi - -add_bin_path - -if command -v docker-compose &> /dev/null -then - echo "Found docker-compose. Checking version.." - FOUND_DOCKER_COMPOSE_VERSION=$(docker-compose --version | awk '{print $4}'|sed s/\,//) - if [ $FOUND_DOCKER_COMPOSE_VERSION == $DOCKER_COMPOSE_VERSION ]; then - echo "Versions match. No need to install docker-compose. Exiting." - elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "aarch64" ]]; then - with_docker_compose "${DOCKER_COMPOSE_VERSION_AARCH64}" - elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "x86_64" ]]; then - with_docker_compose "${DOCKER_COMPOSE_VERSION}" - fi -else - with_docker_compose "${DOCKER_COMPOSE_VERSION}" -fi - -with_go "${GO_VERSION}" -with_mage -with_python -with_dependencies -config_git - -if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-heartbeat" ]]; then - # Install NodeJS - withNodeJSEnv "${NODEJS_VERSION}" - installNodeJsDependencies - - echo "Install @elastic/synthetics" - npm i -g @elastic/synthetics -fi - -mage dumpVariables - -#sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) -sudo chmod -R go-w "${BEATS_PROJECT_NAME}/" #TODO: Remove when the issue is solved https://github.com/elastic/beats/issues/37838 - -pushd "${BEATS_PROJECT_NAME}" > /dev/null - -#TODO "umask 0022" has to be removed after our own image is ready (it has to be moved to the image) -umask 0022 # fix the filesystem permissions issue like this: https://buildkite.com/elastic/beats-metricbeat/builds/1329#018d3179-25a9-475b-a2c8-64329dfe092b/320-1696 - -popd > /dev/null diff --git a/.buildkite/scripts/setenv.sh b/.buildkite/scripts/setenv.sh index f806b131a7a1..56f8d7257d60 100755 --- a/.buildkite/scripts/setenv.sh +++ b/.buildkite/scripts/setenv.sh @@ -2,20 +2,21 @@ set -euo pipefail +WORKSPACE=${WORKSPACE:-"$(pwd)"} +GO_VERSION=$(cat .go-version) + export REPO="beats" export DOCKER_REGISTRY="docker.elastic.co" export SETUP_GVM_VERSION="v0.5.1" export DOCKER_COMPOSE_VERSION="1.21.0" export DOCKER_COMPOSE_VERSION_AARCH64="v2.21.0" + export ASDF_NODEJS_VERSION="18.17.1" export AWS_REGION="eu-central-1" -WORKSPACE=${WORKSPACE:-"$(pwd)"} export WORKSPACE -GO_VERSION=$(cat .go-version) export GO_VERSION - exportVars() { local platform_type="$(uname)" local arch_type="$(uname -m)" @@ -43,13 +44,10 @@ exportVars() { if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-winlogbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-auditbeat" ]]; then exportVars - export RACE_DETECTOR="true" - export TEST_COVERAGE="true" - export DOCKER_PULL="0" export TEST_TAGS="${TEST_TAGS:+$TEST_TAGS,}oracle" fi -if [[ "$BUILDKITE_STEP_KEY" == "xpack-winlogbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "xpack-metricbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "xpack-dockerlogbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "metricbeat-pipeline" ]]; then +if [[ "$BUILDKITE_STEP_KEY" == "xpack-winlogbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "xpack-metricbeat-pipeline" || "$BUILDKITE_STEP_KEY" == "metricbeat-pipeline" ]]; then source .buildkite/scripts/common.sh # Set the MODULE env variable if possible, it should be defined before generating pipeline's steps. It is used in multiple pipelines. defineModuleFromTheChangeSet "${BEATS_PROJECT_NAME}" diff --git a/.buildkite/scripts/stress_tests.sh b/.buildkite/scripts/stress_tests.sh deleted file mode 100755 index b177eb53ea6b..000000000000 --- a/.buildkite/scripts/stress_tests.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -source .buildkite/scripts/install_tools.sh - -set -euo pipefail - -echo "--- Run Stress Tests for $BEATS_PROJECT_NAME" - -pushd "${BEATS_PROJECT_NAME}" > /dev/null - -make STRESS_TEST_OPTIONS='-timeout=20m -race -v -parallel 1' GOTEST_OUTPUT_OPTIONS='| go-junit-report > libbeat-stress-test.xml' stress-tests - -popd > /dev/null diff --git a/.buildkite/scripts/unit_tests.sh b/.buildkite/scripts/unit_tests.sh deleted file mode 100755 index 059b4166e296..000000000000 --- a/.buildkite/scripts/unit_tests.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -source .buildkite/scripts/install_tools.sh - -set -euo pipefail - -echo "--- Run Unit Tests" -pushd "${BEATS_PROJECT_NAME}" > /dev/null - -mage build unitTest - -popd > /dev/null diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml index ff3327913492..c598224438fe 100644 --- a/.buildkite/winlogbeat/pipeline.winlogbeat.yml +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -16,6 +16,12 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + # See docker.go. Sets --pull to docker-compose + DOCKER_PULL: 0 + steps: - group: "Winlogbeat Mandatory Tests" key: "winlogbeat-mandatory-tests" @@ -26,7 +32,7 @@ steps: command: "make -C winlogbeat crosscompile" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -45,7 +51,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -66,7 +72,7 @@ steps: key: "mandatory-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -87,7 +93,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -113,7 +119,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -134,7 +140,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -160,6 +166,10 @@ steps: command: | cd winlogbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index 80c298c725df..14a79eb76d9d 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -22,6 +22,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/auditbeat Mandatory Tests" key: "x-pack-auditbeat-mandatory-tests" @@ -33,12 +37,12 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/auditbeat - echo "~~~ Will run tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" cd x-pack/auditbeat mage update build test retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -57,7 +61,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_RHEL9_X86_64}" @@ -76,7 +80,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -97,7 +101,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -122,7 +126,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -143,7 +147,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -164,7 +168,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -180,7 +184,7 @@ steps: - group: "x-pack/auditbeat MacOS Extended Tests" key: "x-pack-auditbeat-extended-tests-macos" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ steps: - label: ":mac: MacOS x86_64 Unit Tests" command: | @@ -190,7 +194,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -209,7 +213,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -230,7 +234,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" @@ -258,6 +262,10 @@ steps: command: | cd x-pack/auditbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -275,6 +283,10 @@ steps: command: | cd x-pack/auditbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml index a64f7851913b..415c3947874a 100644 --- a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml @@ -2,35 +2,33 @@ name: "beats-xpack-dockerlogbeat" env: - ASDF_MAGE_VERSION: 1.15.0 - AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" - GCP_WIN_MACHINE_TYPE: "n2-standard-8" - IMAGE_MACOS_ARM: "generic-13-ventura-arm" - IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - IMAGE_RHEL9_X86_64: "family/platform-ingest-beats-rhel-9" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" - IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" - IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" - IMAGE_WIN_2016: "family/platform-ingest-beats-windows-2016" - IMAGE_WIN_2019: "family/platform-ingest-beats-windows-2019" - IMAGE_WIN_2022: "family/platform-ingest-beats-windows-2022" + + # Other deps + ASDF_MAGE_VERSION: 1.15.0 + + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" steps: - - group: "Xpack/Dockerlogbeat Mandatory Tests" + - group: "x-pack/dockerlogbeat Mandatory Tests" key: "xpack-dockerlogbeat-mandatory-tests" steps: - - label: ":ubuntu: Xpack/Dockerlogbeat Ubuntu Unit Tests" + - label: ":ubuntu: x-pack/dockerlogbeat Ubuntu Unit Tests" key: "mandatory-linux-unit-test" - command: "cd x-pack/dockerlogbeat && mage build unitTest" + command: | + cd x-pack/dockerlogbeat + mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -42,14 +40,19 @@ steps: - github_commit_status: context: "x-pack/dockerlogbeat: Ubuntu Unit Tests" - - label: ":ubuntu: Xpack/Dockerlogbeat Go Integration Tests" + - label: ":ubuntu: x-pack/dockerlogbeat Go (Module) Integration Tests" key: "mandatory-int-test" - command: "cd x-pack/dockerlogbeat && mage goIntegTest" - env: - MODULE: $MODULE + command: | + set -euo pipefail + # defines the MODULE env var based on what's changed in a PR + source .buildkite/scripts/changesets.sh + defineModuleFromTheChangeSet x-pack/dockerlogbeat + echo "~~~ Running tests" + cd x-pack/dockerlogbeat + mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -59,7 +62,7 @@ steps: - "x-pack/dockerlogbeat/build/*.json" notify: - github_commit_status: - context: "x-pack/dockerlogbeat: Go Integration Tests" + context: "x-pack/dockerlogbeat: Go (Module) Integration Tests" - wait: ~ # with PRs, we want to run packaging only if mandatory tests succeed @@ -69,29 +72,47 @@ steps: depends_on: - "xpack-dockerlogbeat-mandatory-tests" - - group: "Xpack/Dockerlogbeat Packaging" + - group: "x-pack/dockerlogbeat Packaging" key: "xpack-dockerlogbeat-packaging" steps: - - label: ":ubuntu: Xpack/Dockerlogbeat Packaging Linux X86" + - label: ":ubuntu: x-pack/dockerlogbeat Packaging Linux" key: "auditbeat-package-linux-x86" env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" SNAPSHOT: true - command: "cd x-pack/dockerlogbeat && mage package" + command: | + cd x-pack/dockerlogbeat + mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: gcp image: "${IMAGE_UBUNTU_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" + notify: + - github_commit_status: + context: "x-pack/dockerlogbeat: Packaging Linux" - - label: ":linux: Xpack/Dockerlogbeat Packaging Linux ARM" + - label: ":linux: x-pack/dockerlogbeat Packaging Linux arm64" key: "auditbeat-package-linux-arm" env: PLATFORMS: "linux/arm64" PACKAGES: "docker" SNAPSHOT: true - command: "cd x-pack/dockerlogbeat && mage package" + command: | + cd x-pack/dockerlogbeat + mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" + notify: + - github_commit_status: + context: "x-pack/dockerlogbeat: Packaging Linux arm64" diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index b7e71e3c3c0a..1707bca29ecc 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -21,6 +21,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/filebeat Mandatory Tests" key: "x-pack-filebeat-mandatory-tests" @@ -32,7 +36,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -51,11 +55,11 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/filebeat - echo "~~~ Will run tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" cd x-pack/filebeat && mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -74,11 +78,11 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/filebeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" cd x-pack/filebeat && mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -97,7 +101,7 @@ steps: key: "x-pack-filebeat-mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -118,7 +122,7 @@ steps: key: "x-pack-filebeat-mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -139,7 +143,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" @@ -162,7 +166,7 @@ steps: key: "x-pack-filebeat-extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -183,7 +187,7 @@ steps: key: "x-pack-filebeat-extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -204,7 +208,7 @@ steps: key: "x-pack-filebeat-extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -222,14 +226,14 @@ steps: key: "x-pack-filebeat-extended-tests" steps: - label: ":mac: MacOS x86_64 Unit Tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS).*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*(macOS).*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/filebeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -242,14 +246,14 @@ steps: - label: ":mac: MacOS arm64 Unit Tests" skip: "https://github.com/elastic/beats/issues/33036" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS).*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*(macOS).*/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/filebeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -262,13 +266,13 @@ steps: - label: ":linux: Cloud (MODULE) Tests" key: "x-pack-filebeat-extended-cloud-test" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ command: | set -euo pipefail # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/filebeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" .buildkite/scripts/cloud_tests.sh env: @@ -297,7 +301,7 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/filebeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" .buildkite/scripts/cloud_tests.sh env: ASDF_TERRAFORM_VERSION: 1.0.2 @@ -332,6 +336,10 @@ steps: command: | cd x-pack/filebeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -349,6 +357,10 @@ steps: command: | cd x-pack/filebeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 136706e698cc..3ccb94bdf5e6 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -2,7 +2,8 @@ name: "beats-xpack-heartbeat" env: - AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" + AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" @@ -11,7 +12,6 @@ env: IMAGE_MACOS_ARM: "generic-13-ventura-arm" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" IMAGE_RHEL9_X86_64: "family/platform-ingest-beats-rhel-9" - IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" @@ -26,11 +26,15 @@ env: ASDF_MAGE_VERSION: 1.15.0 ASDF_NODEJS_VERSION: 18.17.1 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/heartbeat Mandatory Tests" key: "x-pack-heartbeat-mandatory-tests" steps: - - label: ":linux: Ubuntu Unit Tests" + - label: ":ubuntu: x-pack/heartbeat Ubuntu Unit Tests" key: "mandatory-linux-unit-test" command: | set -euo pipefail @@ -41,7 +45,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -53,7 +57,7 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Ubuntu Unit Tests" - - label: ":go: Go Integration Tests" + - label: ":ubuntu: x-pack/heartbeat Go Integration Tests" key: "mandatory-int-test" command: | set -euo pipefail @@ -64,7 +68,7 @@ steps: mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -76,15 +80,15 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Go Integration Tests" - - label: ":windows: Windows 2016 Unit Tests" + - label: ":windows: x-pack/heartbeat Windows 2016 Unit Tests" key: "mandatory-win-2016-unit-tests" skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" command: | Set-Location -Path x-pack/heartbeat - mage build unitTest + mage build test retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -98,7 +102,8 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Windows 2016 Unit Tests" - - label: ":windows: Windows 2022 Unit Tests" + # Doesn't exist in Jenkins + - label: ":windows: x-pack/heartbeat Windows 2022 Unit Tests" key: "mandatory-win-2022-unit-tests" skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" command: | @@ -106,7 +111,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -124,15 +129,16 @@ steps: key: "x-pack-heartbeat-extended-win-tests" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*[Ww]indows.*/ skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" + steps: - - label: ":windows: Windows 10 Unit Tests" + - label: ":windows: x-pack/heartbeat Windows 10 Unit Tests" command: | Set-Location -Path x-pack/heartbeat mage build test key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -146,14 +152,15 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Windows 10 Unit Tests" - - label: ":windows: Windows 11 Unit Tests" + # Doesn't exist in Jenkins + - label: ":windows: x-pack/heartbeat Windows 11 Unit Tests" command: | Set-Location -Path x-pack/heartbeat mage build test key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -167,13 +174,13 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Windows 11 Unit Tests" - - label: ":windows: Windows 2019 Unit Tests" + - label: ":windows: x-pack/heartbeat Windows 2019 Unit Tests" command: | Set-Location -Path x-pack/heartbeat mage build test retry: automatic: - - limit: 3 + - limit: 3 key: "extended-win-2019-unit-tests" agents: provider: "gcp" @@ -188,11 +195,12 @@ steps: - github_commit_status: context: "x-pack/heartbeat: Windows 2019 Unit Tests" - - group: "x-pack/heartbeat MacOS Extended Tests" + - group: "x-pack/heartbeat macOS Extended Tests" key: "x-pack-heartbeat-extended-tests-macos" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + steps: - - label: ":mac: MacOS x86_64 Unit Tests" + - label: ":mac: x-pack/heartbeat macOS x86_64 Unit Tests" command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -202,7 +210,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -211,9 +219,9 @@ steps: - "x-pack/heartbeat/build/*.json" notify: - github_commit_status: - context: "x-pack/heartbeat: MacOS x86_64 Extended Tests" + context: "x-pack/heartbeat: macOS x86_64 Extended Tests" - - label: ":mac: MacOS arm64 Unit Tests" + - label: ":mac: x-pack/heartbeat macOS arm64 Unit Tests" command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh @@ -223,7 +231,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -232,22 +240,29 @@ steps: - "x-pack/heartbeat/build/*.json" notify: - github_commit_status: - context: "x-pack/heartbeat: MacOS arm64 Extended Tests" + context: "x-pack/heartbeat: macOS arm64 Extended Tests" - wait: ~ + # with PRs, we want to run packaging only if mandatory tests succeed + # for other cases, e.g. merge commits, we want to run packaging (and publish) independently of other tests + # this allows building DRA artifacts even if there is flakiness in mandatory tests if: build.env("BUILDKITE_PULL_REQUEST") != "false" depends_on: - step: "x-pack-heartbeat-mandatory-tests" - - group: "Packaging" - key: "packaging" + - group: "x-pack/heartbeat Packaging" + key: "x-pack-heartbeat-packaging" if: build.env("BUILDKITE_PULL_REQUEST") != "false" steps: - - label: ":linux: Packaging Linux" + - label: ":ubuntu: x-pack/heartbeat Packaging Linux x86_64" key: "packaging-linux" command: | cd x-pack/heartbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -258,20 +273,24 @@ steps: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" notify: - github_commit_status: - context: "x-pack/heartbeat: Packaging Linux" + context: "x-pack/heartbeat: Packaging Linux x86_64" - - label: ":linux: Packaging ARM" + - label: ":ubuntu: x-pack/heartbeat Packaging Linux arm64" key: "packaging-arm" command: | cd x-pack/heartbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" - imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" env: PLATFORMS: "linux/arm64" PACKAGES: "docker" notify: - github_commit_status: - context: "x-pack/heartbeat: Packaging Linux ARM" + context: "x-pack/heartbeat: Packaging Linux arm64" diff --git a/.buildkite/x-pack/pipeline.xpack.libbeat.yml b/.buildkite/x-pack/pipeline.xpack.libbeat.yml index 6bf456f6d83d..6c26e9614df2 100644 --- a/.buildkite/x-pack/pipeline.xpack.libbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.libbeat.yml @@ -17,6 +17,10 @@ env: #Deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/libbeat Mandatory Tests" key: "x-pack-libbeat-mandatory-tests" @@ -28,7 +32,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -47,7 +51,7 @@ steps: mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -66,7 +70,7 @@ steps: mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -85,7 +89,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -106,7 +110,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -131,7 +135,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -152,7 +156,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -173,7 +177,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -198,7 +202,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index 4c1c31521f92..fb75291dde0d 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -21,6 +21,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/metricbeat Mandatory Tests" key: "x-pack-metricbeat-mandatory-tests" @@ -32,7 +36,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -51,11 +55,11 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/metricbeat - echo "~~~ Will run tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" cd x-pack/metricbeat && mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -74,11 +78,11 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/metricbeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" cd x-pack/metricbeat && mage pythonIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -97,7 +101,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -118,7 +122,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -143,7 +147,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -164,7 +168,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -185,7 +189,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -204,14 +208,14 @@ steps: if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS|aws).*/ steps: - label: ":mac: MacOS x86_64 Unit Tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.**/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.**/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/metricbeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -224,14 +228,14 @@ steps: - label: ":mac: MacOS arm64 Unit Tests" skip: "https://github.com/elastic/beats/issues/33036" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.**/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.**/ command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/metricbeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -245,7 +249,7 @@ steps: - label: ":linux: Cloud (MODULE) Tests" key: "x-pack-metricbeat-extended-cloud-test" skip: "doesn't belong in a stage in Jenkins, thus skipped" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ # see link in Jenkins: https://github.com/elastic/beats/blob/ccd7b135df70358f8a02393d9bd8b716428b8048/x-pack/metricbeat/Jenkinsfile.yml#L39 # additionally skipping due to https://github.com/elastic/ingest-dev/issues/3170 command: | @@ -253,7 +257,7 @@ steps: # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/metricbeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" .buildkite/scripts/cloud_tests.sh env: @@ -278,13 +282,13 @@ steps: skip: "https://github.com/elastic/beats/issues/36425" # see commented out section in Jenkins: https://github.com/elastic/beats/blob/main/x-pack/metricbeat/Jenkinsfile.yml#L41-L52 # additionally skipping due to https://github.com/elastic/ingest-dev/issues/3170 - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*aws.*/ command: | set -euo pipefail # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet x-pack/metricbeat - echo "~~~ Running tests with env var MODULE=$$MODULE" + echo "~~~ Running tests" .buildkite/scripts/cloud_tests.sh env: ASDF_TERRAFORM_VERSION: 1.0.2 @@ -319,6 +323,10 @@ steps: command: | cd x-pack/metricbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -336,6 +344,10 @@ steps: command: | cd x-pack/metricbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml index c8ecac79735b..219bfe5910dd 100644 --- a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml @@ -10,7 +10,6 @@ env: IMAGE_MACOS_ARM: "generic-13-ventura-arm" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" @@ -21,6 +20,10 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/osquerybeat Mandatory Tests" key: "x-pack-osquerybeat-mandatory-tests" @@ -32,7 +35,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -51,7 +54,7 @@ steps: mage goIntegTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -70,7 +73,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -91,7 +94,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -116,7 +119,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -137,7 +140,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -158,7 +161,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -183,7 +186,7 @@ steps: cd x-pack/osquerybeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -201,7 +204,7 @@ steps: cd x-pack/osquerybeat && mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -229,6 +232,10 @@ steps: .buildkite/scripts/install-msitools.sh cd x-pack/osquerybeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index 1ab71c30d7df..117824689a91 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -20,6 +20,10 @@ env: #Deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/packetbeat Mandatory Tests" key: "x-pack-packetbeat-mandatory-tests" @@ -31,7 +35,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -50,7 +54,7 @@ steps: mage systemTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -69,7 +73,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_RHEL9_X86_64}" @@ -88,7 +92,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -109,7 +113,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -131,7 +135,7 @@ steps: mage systemTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -156,7 +160,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -177,7 +181,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -198,7 +202,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -220,7 +224,7 @@ steps: mage systemTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -246,7 +250,7 @@ steps: if: build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" @@ -260,7 +264,7 @@ steps: - group: "x-pack/packetbeat MacOS Extended Tests" key: "x-pack-packetbeat-extended-macos-tests" - if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + if: build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ steps: - label: ":mac: MacOS Unit Tests" key: "extended-macos-unit-tests" @@ -271,7 +275,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_X86_64}" @@ -291,7 +295,7 @@ steps: mage build unitTest retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "orka" imagePrefix: "${IMAGE_MACOS_ARM}" @@ -314,6 +318,10 @@ steps: command: | cd x-pack/packetbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" @@ -331,6 +339,10 @@ steps: command: | cd x-pack/packetbeat mage package + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" diff --git a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml index c6b5a6f59fe5..b69aec324985 100644 --- a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml @@ -14,24 +14,25 @@ env: # Other deps ASDF_MAGE_VERSION: 1.15.0 + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + steps: - group: "x-pack/Winlogbeat Mandatory Tests" key: "x-pack-winlogbeat-mandatory-tests" steps: - - label: ":windows: x-pack/Winlogbeat Win-2019 Unit (MODULE) Tests" key: "mandatory-win-2019-module-unit-tests" command: | Import-Module ./.buildkite/scripts/changesets.psm1 defineModuleFromTheChangeSet 'x-pack/winlogbeat' - Write-Output "~~~ Will run tests with env var MODULE=$$Env:MODULE" + Write-Output "~~~ Running tests" Set-Location -Path x-pack/winlogbeat mage build unitTest - env: - MODULE: $MODULE retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -52,7 +53,7 @@ steps: key: "mandatory-win-2016-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2016}" @@ -73,7 +74,7 @@ steps: key: "mandatory-win-2022-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2022}" @@ -99,7 +100,7 @@ steps: key: "extended-win-10-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_10}" @@ -120,7 +121,7 @@ steps: key: "extended-win-11-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_11}" @@ -141,7 +142,7 @@ steps: key: "extended-win-2019-unit-tests" retry: automatic: - - limit: 3 + - limit: 3 agents: provider: "gcp" image: "${IMAGE_WIN_2019}" @@ -170,6 +171,10 @@ steps: - label: ":ubuntu: Packaging Linux" key: "packaging-linux" command: "cd x-pack/winlogbeat && mage package" + retry: + automatic: + - limit: 3 + timeout_in_minutes: 20 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" diff --git a/.ci/jobs/packaging.yml b/.ci/jobs/packaging.yml index 4020c7479d28..8dd8fb6bd85f 100644 --- a/.ci/jobs/packaging.yml +++ b/.ci/jobs/packaging.yml @@ -13,8 +13,8 @@ discover-pr-forks-strategy: 'merge-current' discover-pr-forks-trust: 'permission' discover-pr-origin: 'merge-current' - discover-tags: true - head-filter-regex: '(7\.17|PR-.*)' + discover-tags: false + head-filter-regex: '(PR-.*)' disable-pr-notifications: true notification-context: 'beats-packaging' repo: 'beats' @@ -27,10 +27,6 @@ - tags: ignore-tags-older-than: -1 ignore-tags-newer-than: 30 - - named-branches: - - regex-name: - regex: '7\.17' - case-sensitive: true - change-request: ignore-target-only-changes: true clean: diff --git a/.go-version b/.go-version index f124bfa15544..ae7bbdf047aa 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.9 +1.21.10 diff --git a/.golangci.yml b/.golangci.yml index fc58b57a4d1b..0c2be387e775 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -124,7 +124,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.21.9" + go: "1.21.10" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -142,19 +142,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.21.9" + go: "1.21.10" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.21.9" + go: "1.21.10" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.21.9" + go: "1.21.10" gosec: excludes: diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b28a5cd4c4d5..c331311658b4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -144,6 +144,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Restore netflow input on Windows {pull}39024[39024] - Upgrade azure-event-hubs-go and azure-storage-blob-go dependencies. {pull}38861[38861] - Fix concurrency/error handling bugs in the AWS S3 input that could drop data and prevent ingestion of large buckets. {pull}39131[39131] +- Fix EntraID query handling. {issue}39419[39419] {pull}39420[39420] *Heartbeat* @@ -172,6 +173,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* +- Fix error handling in perfmon metrics. {issue}38140[38140] {pull}39404[39404] *Elastic Logging Plugin* @@ -187,7 +189,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Beats will now connect to older Elasticsearch instances by default {pull}36884[36884] - Raise up logging level to warning when attempting to configure beats with unknown fields from autodiscovered events/environments - elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] -- Update to Go 1.21.9. {pulk}38727[38727] +- Update to Go 1.21.10. {pull}39467[39467] - Enable early event encoding in the Elasticsearch output, improving cpu and memory use {pull}38572[38572] - The environment variable `BEATS_ADD_CLOUD_METADATA_PROVIDERS` overrides configured/default `add_cloud_metadata` providers {pull}38669[38669] - Introduce log message for not supported annotations for Hints based autodiscover {pull}38213[38213] @@ -204,6 +206,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add container id to file events (Linux only, eBPF backend). {pull}38328[38328] - Add procfs backend to the `add_session_metadata` processor. {pull}38799[38799] - Add process.entity_id, process.group.name and process.group.id in add_process_metadata processor. Make fim module with kprobes backend to always add an appropriately configured add_process_metadata processor to enrich file events {pull}38776[38776] +- Reduce data size for add_session_metadata processor by removing unneeded fields {pull}39500[39500] *Auditbeat* @@ -255,13 +258,16 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add parseDateInTZ value template for the HTTPJSON input. {pull}37738[37738] - Add support for complex event objects in the HTTP Endpoint input. {issue}37910[37910] {pull}38193[38193] - Parse more fields from Elasticsearch slowlogs {pull}38295[38295] -- Update CEL mito extensions to v1.10.0 to add keys/values helper. {pull}38504[38504] +- Update CEL mito extensions to v1.10.0 to add base64 decode functions. {pull}38504[38504] - Add support for Active Directory an entity analytics provider. {pull}37919[37919] - Add AWS AWSHealth metricset. {pull}38370[38370] - Add debugging breadcrumb to logs when writing request trace log. {pull}38636[38636] - added benchmark input {pull}37437[37437] - added benchmark input and discard output {pull}37437[37437] - Ensure all responses sent by HTTP Endpoint are HTML-escaped. {pull}39329[39329] +- Update CEL mito extensions to v1.11.0 to improve type checking. {pull}39460[39460] +- Improve logging of request and response with request trace logging in error conditions. {pull}39455[39455] +- Add HTTP metrics to CEL input. {issue}39501[39501] {pull}39503[39503] *Auditbeat* @@ -290,6 +296,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add SSL support to mysql module {pull}37997[37997] - Add SSL support for aerospike module {pull}38126[38126] - Add last_terminated_timestamp metric in kubernetes module {pull}39200[39200] {issue}3802[3802] +- Add pod.status.ready_time and pod.status.reason metrics in kubernetes module {pull}39316[39316] *Metricbeat* diff --git a/Jenkinsfile b/Jenkinsfile index 23a4f3798dfe..04df94f4b256 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -200,16 +200,6 @@ COMMIT=${env.GIT_BASE_COMMIT} VERSION=${env.VERSION}-SNAPSHOT""") archiveArtifacts artifacts: 'packaging.properties' } - cleanup { - // Required to enable the flaky test reporting with GitHub. Workspace exists since the post/always runs earlier - dir("${BASE_DIR}"){ - notifyBuildResult(prComment: true, - slackComment: true, - analyzeFlakey: !isTag(), jobName: getFlakyJobName(withBranch: getFlakyBranch()), - githubIssue: isGitHubIssueEnabled(), - githubLabels: 'Team:Elastic-Agent-Data-Plane') - } - } } } diff --git a/NOTICE.txt b/NOTICE.txt index 3cee98a02e58..3ac32e78f656 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12969,11 +12969,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.9.4 +Version: v0.9.7 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.9.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.9.7/LICENSE: Apache License Version 2.0, January 2004 @@ -15857,11 +15857,11 @@ limitations under the License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/mito -Version: v1.10.0 +Version: v1.11.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.10.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.11.0/LICENSE: Apache License @@ -25256,11 +25256,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : go.uber.org/zap -Version: v1.26.0 +Version: v1.27.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.26.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.27.0/LICENSE: Copyright (c) 2016-2017 Uber Technologies, Inc. @@ -25285,11 +25285,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.21.0 +Version: v0.22.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.21.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.22.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -25433,11 +25433,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/net -Version: v0.23.0 +Version: v0.24.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.23.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.24.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -38885,6 +38885,36 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/pkcs8 +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/pkcs8@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 youmark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + -------------------------------------------------------------------------------- Dependency : github.com/elazarl/goproxy Version: v0.0.0-20180725130230-947c36da3153 @@ -54355,11 +54385,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1. -------------------------------------------------------------------------------- Dependency : go.uber.org/goleak -Version: v1.2.0 +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.3.0/LICENSE: The MIT License (MIT) @@ -54386,11 +54416,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.18.0 +Version: v0.19.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.18.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.19.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index f04ddebe8488..3f71118e8e52 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 RUN \ apt-get update \ diff --git a/catalog-info.yaml b/catalog-info.yaml index 0e79d2ad8484..89ef7a216f9c 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -1040,9 +1040,7 @@ spec: spec: repository: elastic/beats pipeline_file: ".buildkite/packaging.pipeline.yml" - branch_configuration: "main 8.*" - # TODO enable after packaging backports for release branches - # branch_configuration: "main 8.* 7.17" + branch_configuration: "main 8.* 7.17" cancel_intermediate_builds: false skip_intermediate_builds: false maximum_timeout_in_minutes: 90 diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index ba6e39d9f419..ccd70822f3f0 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.9 as builder +FROM golang:1.21.10 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index 44aa0dc1eced..2b24c9452865 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.9 as builder +FROM golang:1.21.10 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index bda15a5708a6..f76069d22157 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.9 as builder +FROM golang:1.21.10 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/go.mod b/go.mod index 2f4dd3807495..9c9bd467104d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/elastic/beats/v7 -go 1.21.9 +go 1.21.0 + +toolchain go1.21.10 require ( cloud.google.com/go/bigquery v1.55.0 @@ -150,11 +152,11 @@ require ( go.etcd.io/bbolt v1.3.6 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.21.0 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.22.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/mod v0.14.0 - golang.org/x/net v0.23.0 + golang.org/x/net v0.24.0 golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.19.0 @@ -204,11 +206,11 @@ require ( github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.6.0 github.com/elastic/elastic-agent-autodiscover v0.6.14 - github.com/elastic/elastic-agent-libs v0.9.4 + github.com/elastic/elastic-agent-libs v0.9.7 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.2 github.com/elastic/go-elasticsearch/v8 v8.13.1 - github.com/elastic/mito v1.10.0 + github.com/elastic/mito v1.11.0 github.com/elastic/tk-btf v0.1.0 github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 @@ -289,6 +291,7 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect github.com/elastic/go-windows v1.0.1 // indirect + github.com/elastic/pkcs8 v1.0.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect @@ -381,7 +384,7 @@ require ( go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect diff --git a/go.sum b/go.sum index fefe7776b03c..9a8aa1c2ac3b 100644 --- a/go.sum +++ b/go.sum @@ -555,8 +555,8 @@ github.com/elastic/elastic-agent-autodiscover v0.6.14 h1:0zJYNyv9GKTOiNqCHqEVboP github.com/elastic/elastic-agent-autodiscover v0.6.14/go.mod h1:39/fHHlnyTK6oUNZfAhxJwBTVahO9tNasEIjzsxGMu8= github.com/elastic/elastic-agent-client/v7 v7.8.1 h1:J9wZc/0mUvSEok0X5iR5+n60Jgb+AWooKddb3XgPWqM= github.com/elastic/elastic-agent-client/v7 v7.8.1/go.mod h1:axl1nkdqc84YRFkeJGD9jExKNPUrOrzf3DFo2m653nY= -github.com/elastic/elastic-agent-libs v0.9.4 h1:I6c1NAj3grJ1YZgo+U04w0csMAWGIn6eZTb23Z5MbAI= -github.com/elastic/elastic-agent-libs v0.9.4/go.mod h1:SkMnpLm+tXybBrIWK6f3rcOhrDIztLbYCV46m8gwc8g= +github.com/elastic/elastic-agent-libs v0.9.7 h1:LZdfxbq724Y1zAdE3COp+OIPwU8SquOCLIXpI/twcdQ= +github.com/elastic/elastic-agent-libs v0.9.7/go.mod h1:xhHF9jeWhPzKPtEHN+epKjdiZi0bCbACLxwkp1aHMpc= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.2 h1:/tvTKOt55EerU0WwGFoDhBlyWLgxyv7d8xCbny0bciw= @@ -596,8 +596,10 @@ github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a h1:8WfL/X6fK11 github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/mito v1.10.0 h1:LhkzBXarU32zAf24k3HOWki0DIoxsNWbVYY6vyHk2RQ= -github.com/elastic/mito v1.10.0/go.mod h1:n7AvUVtYQQXb8fq87FI8z67TNzuhwBV3kHBkDT1qJYQ= +github.com/elastic/mito v1.11.0 h1:thk9uxsTuTFeihMf3I6WLIeZyrBLQYuisWRYRUZl6Ec= +github.com/elastic/mito v1.11.0/go.mod h1:J+wCf4HccW2YoSFmZMGu+d06gN+WmnIlj5ehBqine74= +github.com/elastic/pkcs8 v1.0.0 h1:HhitlUKxhN288kcNcYkjW6/ouvuwJWd9ioxpjnD9jVA= +github.com/elastic/pkcs8 v1.0.0/go.mod h1:ipsZToJfq1MxclVTwpG7U/bgeDtf+0HkUiOxebk95+0= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3 h1:ChPwRVv1RR4a0cxoGjKcyWjTEpxYfm5gydMIzo32cAw= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -1765,8 +1767,8 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1782,8 +1784,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1822,8 +1824,9 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1960,8 +1963,9 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2112,6 +2116,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2123,8 +2128,9 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index 4d7d45237da3..f8765612d4bb 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 RUN \ apt-get update \ diff --git a/libbeat/docs/security/api-keys.asciidoc b/libbeat/docs/security/api-keys.asciidoc index 1a934b67ce8d..db068f087828 100644 --- a/libbeat/docs/security/api-keys.asciidoc +++ b/libbeat/docs/security/api-keys.asciidoc @@ -33,7 +33,7 @@ POST /_security/api_key "index": [ { "names": ["{beat_default_index_prefix}-*"], - "privileges": ["view_index_metadata", "create_doc"] + "privileges": ["view_index_metadata", "create_doc", "auto_configure"] } ] } diff --git a/libbeat/docs/security/users.asciidoc b/libbeat/docs/security/users.asciidoc index d6849caf0587..846e8bc5937c 100644 --- a/libbeat/docs/security/users.asciidoc +++ b/libbeat/docs/security/users.asciidoc @@ -233,8 +233,9 @@ To grant the required privileges: . Create a *writer role*, called something like +{beat_default_index_prefix}_writer+, that has the following privileges: + -NOTE: The `monitor` cluster privilege and the `create_doc` privilege on -+{beat_default_index_prefix}-*+ indices are required in every configuration. +NOTE: The `monitor` cluster privilege and the `create_doc` and `auto_configure` +privileges on +{beat_default_index_prefix}-*+ indices are required in every +configuration. + [options="header"] |==== @@ -259,10 +260,24 @@ ifeval::["{beatname_lc}"=="filebeat"] |Check for ingest pipelines used by modules. Needed when using modules. endif::[] +ifeval::["{beatname_lc}"=="winlogbeat"] +|Cluster +|`read_pipeline` +|Check for ingest pipelines used by {beatname_uc}. +endif::[] + |Index |`create_doc` on +{beat_default_index_prefix}-*+ indices |Write events into {es} + +|Index +|`auto_configure` on +{beat_default_index_prefix}-*+ indices +|Update the datastream mapping. Consider either disabling entirely or adding the +rule `-{beat_default_index_prefix}-*` to the cluster settings +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation[action.auto_create_index] +to prevent unwanted indices creations from the agents. |==== + ifndef::apm-server[] + Omit any privileges that aren't relevant in your environment. diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 35201b361759..ae9efcf9d01a 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.14.0 :doc-branch: main -:go-version: 1.21.9 +:go-version: 1.21.10 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index fe700b0b6ced..ce43c81c312b 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 COPY --from=docker:26.0.0-alpine3.19 /usr/local/bin/docker /usr/local/bin/ RUN \ diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index ca8f11854451..708d996f99d8 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -46811,6 +46811,26 @@ type: keyword -- +*`kubernetes.pod.status.reason`*:: ++ +-- +The reason the pod is in its current state (Evicted, NodeAffinity, NodeLost, Shutdown or UnexpectedAdmissionError) + + +type: keyword + +-- + +*`kubernetes.pod.status.ready_time`*:: ++ +-- +Readiness achieved time in unix timestamp for a pod + + +type: double + +-- + [float] === replicaset diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 00c6518bf4ce..0a5646a9d75c 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 COPY test/main.go main.go diff --git a/metricbeat/module/kubernetes/_meta/test/KSM/ksm.v2.12.0.plain b/metricbeat/module/kubernetes/_meta/test/KSM/ksm.v2.12.0.plain index 35cf6a5ab8c8..35b3f0e3aa02 100644 --- a/metricbeat/module/kubernetes/_meta/test/KSM/ksm.v2.12.0.plain +++ b/metricbeat/module/kubernetes/_meta/test/KSM/ksm.v2.12.0.plain @@ -1057,7 +1057,7 @@ kube_pod_status_container_ready_time{namespace="kube-system",pod="kube-scheduler kube_pod_status_container_ready_time{namespace="kube-system",pod="coredns-76f75df574-v8skx",uid="b02dd54a-7c6b-4a45-8f42-cbc00c6fb040"} 1.713862204e+09 # HELP kube_pod_status_reason The pod status reasons # TYPE kube_pod_status_reason gauge -kube_pod_status_reason{namespace="default",pod="web-0",uid="37dd3592-b439-4090-a233-49b8faa2b9b3",reason="Evicted"} 0 +kube_pod_status_reason{namespace="default",pod="web-0",uid="37dd3592-b439-4090-a233-49b8faa2b9b3",reason="Evicted"} 1 kube_pod_status_reason{namespace="default",pod="web-0",uid="37dd3592-b439-4090-a233-49b8faa2b9b3",reason="NodeAffinity"} 0 kube_pod_status_reason{namespace="default",pod="web-0",uid="37dd3592-b439-4090-a233-49b8faa2b9b3",reason="NodeLost"} 0 kube_pod_status_reason{namespace="default",pod="web-0",uid="37dd3592-b439-4090-a233-49b8faa2b9b3",reason="Shutdown"} 0 diff --git a/metricbeat/module/kubernetes/fields.go b/metricbeat/module/kubernetes/fields.go index 1c89d8a62358..36d93f81a8ee 100644 --- a/metricbeat/module/kubernetes/fields.go +++ b/metricbeat/module/kubernetes/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKubernetes returns asset data. // This is the base64 encoded zlib format compressed contents of module/kubernetes. func AssetKubernetes() string { - return "eJzsfd9y27iS932eAuWbL/nKo9rr1NapmnHO2eNNJuO1k5mLrS0FIiEJYwrgAKAdndqH38JfgiRAUiIoO7Z0MTWxre4fGo1Gd6PR+Anco/17cF+tECNIIP4GAIFFgd6Di4/uhxdvAMgRzxguBabkPfjbGwAAqP8A7JBgOJPfZqhAkKP3YAPfAMCREJhs+Hvw3xecFxeX4GIrRHnxP/J3W8rEMqNkjTfvwRoWHL0BYI1RkfP3isFPgMAdasGTH7EvJQdGq9L8JABPfq7JmrIdlD8GkOSACygwFzjjgK5BSXMOdpDADcrBau/xWRgKFo0jaCHBEnPEHhBzvwmh6kHWEuDPN9dAE/RkaT9NmdqPL6k2vB38k7LFA2IcU9L4CwvzHu0fKctbv+sBKz8S5Z1BKTkAw2ERBoHJ3CAkh34QDP1VIS4WDHFasQylw3GrKaMcBGm3AfBqNSeGGPkOjIyW6QEARRa8zYqKC8QuFVNewgxdOum868X1gNgqHax/fvlyAzok2zwzmicUheLZIdnlSQQiYikZpZ8Gg0GxAB0WbSw52y9ZlXBp/oHEFjEgtsjyABVHHORsD9qM2mDuMWlzm4DkIya5NPGG+sCU7EpKEBHp2F9ZkmALSV5gsvGF0oumvX9MRCKtpSIJ1tTOzAgzkdxqG4IORXeYbQhKco0NdiIEu0hChNvMd0hsaUJ9VAszQLQzaMoTqqEbcZuqZVsymiHOgxxDihjyOXx6WVktOMo6v7c0c1qtirbd6wzk6uYr4CijJG8j85wLtKNsL7d1nCMiFqt97R52+RaUbAK/1M7hexD7cgPVL/KPACbA8jQYhiA+YCYqWJwSoWE5BHCd8wUtEVlktOpYv0FoDdafq90KMWlxJUGwxgVyf0BZfBq5gEygPIHS3GmFARyTDCkTY5Tb8ggugEcosm0y9UcPiAi+4PhfSE/3YlVl90gs/n90cHT1J8pCste/WI6fgj/kUDQEIBGAHHPB8KpS0Q8mER2KY+fVblZ1vat2UmEea9xcAefHgE2pwj6iIQgBtwUMGO0O567hBsp4630a3BtfRuq0By2yj/CSEh52LY9R6afR5ahE1OB61Fv5FwhmWz3YS+t3qP9Z1cHIpR8wXZrwBZIcOGdwMUYkJ1oidlZHr4+ZFkaNQ1DrsvCgHmYFljKsXewuiCiAKHPND0BuyPd5UulcmqTyC4jMZ5ZXTCWrFtVRutVga50/S1Oq/E6qzg5njA75Vz6S6SLoYiG+w6B/OA7MCa2QszcFFIhk+4bJuQRbzAXdMLgDGlMcf1YxJpfDdEFek3WBN1sxrEqSGqsIwWST2AbYZZgJ/IDUt4Fh1G8TkMjyhZ6EJAahTteaqeUACsUlyB5WORYLtXUmYa/ohbyEJkOGJDSUJ+RpSbaZ1yaLCIjJtOS0J11HL0luWrnjS4F3YSclh6L9iwHX4E4SBB2CXkw6ejMYSuvcfAUVhxsUEERs2D4U9d3oOgwB6qPaGCRlIcLDxIcY+EwCFrrNJhqr2c8IN89+rpzaSblfUYaM8Akk0f2rgRcSKgUTgz0C8ki4WjFQPsDSAaM5WpTBTarGxTNYoHy5LiiM/aH1JUvEsm7+8qgxSPlCDqClKf9tYg9BBSwUdgCLgmZQwFWB5Pd6B1vgHRY/3mhztMYE5Rq+S1vWpvCt/ElUIgCvQUXUd1H+bgGu162vy79Rv+YAMgR2mHO5gcoQRP7hN0n0m/rnNy6gQEv9A2N3kPnaioqt9Eok2xxQAsQWCgXoEogttgez4BEXBVjVbBARmKFiHz4zK+hmfEpwQN6f6EbGK2t6oKmEDxAXMLwwp5vLWPAFxlmFoRgOjNdDJR83WJDBEmZY7IdDPPuXr0E+ep2Nl400xa9BLmrLGS8WLA1DPAU9zf8IRxgg6Tb7RelBvVqiA/Jy3Az1O0bpcElWYyBFtHMOSEpBApCaRxTJciWvxWi39XDgnGU+t/+5iUQLIjrgZ+4B/+qhP9AJjmgAePZ+8JgxT3CFjUIMecMGxXNziP3pY51jcvDClvDt3V3/AnaHppTdY7LhKJ5TfBkS+UMPFHAkxpu257nOY0M50ZqP6lIJN2gNqyKQyT7s/D889Dp1KhmBCKdG2ezJEOkS2hguZ3coFeuEFTqvI2y8pVSoKhS+5wLtDo4gX4snG5aTH2GdQ+2wjExo9XQh90nCyK+BANI/ZmK0KBDTdygmHTddOWLmRkaaw6ZTlpWfspz81PWpaetSFbdoUar8bzpen+EOjat9/hclCflekzWDXLAqExVDXeLPuwTXpY+YvjelLd/VzVd1zgp4iYiQ1u9cpjsB4Y9Tpish7OD3BAg+uXjiyQqFXbmAjXVcobAqHa4I/g5QSbNtTMGbRW7JVm5fsdxhQq6nWVlaV7IjqHfR8BKsGL1HBOT0Ubox6nJkxdWec2n2ArX4A7Y/cJcoYeWaq3o0sG35Vat2rTMAuTMfh3iGAilXtOYVuA1MwPH4T1oU15qVdh1upyxu4jhPWG7rxqYKjE25pPpmujHMUYuXDt1TKFKwmjudFukazvmmwJREJ8Q3v5r75eRp9Lw7iico1J1Bl/yc918VqtLdrJADQzJA0DW60x2bf9JHGTvvrc8CtpAr/8ZwclW6xt+hDKwQIvbHHaG4IYciMi8PQdaYYL5N4px1xkDXCocaS04J0gc6WPnPJaMbJh03NXOQk/8n9IgySqTvz3SGplcEx44a5nkKQ/KH4wbzXFdRHIsoR6XYJoVkytY15WNhMSQYTmJ0a2DEM7+K+iHgGkfQGS0KlAkavmZ83B1AnCm3LOUuo85YLOXuaAOZC399bhEsxHafFJGjqrAdCCm1aA5kr5FHjujGY7hpHJodKA63CyCYI7bAfLmDXESuu68oLRBs38YfasawrbsxZN3sKiZcQBnvYm5AOApv2iDb9w0Pzu5+2SK/24257OnO7oFZhu43yshDhsAGERk86f489paHMeINDlhZfDkRH9vdgsABmeO4hkb0oncSriQVzQUwlFGW6w25tl8C75D+WQmZwFlVQGbu8Modj2bKBucBhOqbAu7KAMqu3erLka8x42JpWJFIe5rDr2V8sQDlOBUPUPOQP4vfYi3g7IAkiwE8dYKQdw79NQaBvovx2vCrpmM0AeV1Lwz8gEhAHBkt90tBQwjq3RXyVpuOeM66F92tojQWnNPCdo+ZI7l/2ZcuHdfPMZDAjyl9P0dlFm2bFoZKyoTu04J5YC76FtCsDWTWjO7A4xZnWyUcbRswry1jEFLa85rP0v2QhAElY7F4J1UwhwJOn7FfDSUAOacZVrvCIxbb3jXUN29hE3q48+f0gKHOhIA+gzXiGLZhtBQDlVLsWyk1IDsvy7RHav9hyBqVWNfKEHa005/njeKp+o+lZaxIyphSLwK9AB7h0Gq0x47L5G2VfjdtlXyB9J9yVjjhsfFXgv+qEFDnbHiNpbdJPSCBDI8z46hYLwtM7hOCuf0k7ThDXKIxLbdi2wgmD7R4QPkygHEu62R5huTSZ6dgidNrzs83164pl9GenulK251N8va7mvQwTms8fIPVw3S+9WopHyD6tAv26/WHAd5+fmRKzOddIFfpi/Pd8fPd8cgn/d1x5bH+6NfGz7e0wn9zvqXV+aS7pXW+79KBfL7vEoN+vr0xcHuDICG1J5ntZt9fuAreogzhB5XvV42diL2NJGUMMBGIrWGG1B3Czk8Blr6msJeXLt01P53ekxstFdLfYSgT4AEWFQLf/u1br2gQY6G6xNGyGTvu74bTEw3Z5cReuoJ9YZDwHRbi9enYlyfUMXfYdL7KZj8jZ+0f51tsB4vofIHN/3TE8zrurnnFKpE2KG1Yp+liU+N6Lv1rakSxHjbO/6xINPt2jA3HO+mvz9STKL4/DDMYYgJGrnQwPr01ZsWDw9Jg1zsVnxy+g4CRuwh43YIcsc+AQ8zeqxRieDdyKYbGjdQppxAlzX/IQ4hzDsF+jsohPIfIy0X357hbPKd5+RKcl1d1+vdsTrs6wJ5ji7hDmiS/qsbIcnN13aF4uz2U6YhMCQKUgR1lyP9jew2b5qrmu9UpbvhLmNffeR4dlBOf2J6PJjvAn6V1ODeQTGcyRnWR/JFsA3iFidnGoo4PunXQvnz5J+1aMI+d8/Yf0ACk6Sw5zRjEY90XX8WiNck1O5KzoC7BD1Q7lXCDljMWSmhYo8s2lqfBEy/a8Npxfd9PSTh51xwVrZTP+p87zB3ELdrM4ZRdAc/93UDS5iXn/m49rM/93c793c793c793erPub/bub/bub/blCk493d7Df3d+J60HY+jD/vuqxXSsZ+JAPckO/Lcj1UF4mk33z3JbiSsW0naWflD3gbvgkq54iIARzwT3sV1Qh2NwB7U1ehoTNmHanIHdztMNnM4YYYL8NjE/LFjkSbt9tUDd4SGDCA9obp87hnIeJ1x1ivborwqpj3B4GWuHL0f7hGGU6aRXnI6Lt7r9Ch2d1qhpJZ3Kbuin047l1Q8441iSkbXuJNUSMM1RNvzqKoi4WB/FgLtSmHoykDUruFWBeWsfXlGvbNxmpaHtQnr6XQIzonaILZzojYE8JyoPSdqDxPyOVF7TtQ2h3BO1J4TtWPQnRO150TtOVEbGOH5IY7YOM4PcURGfH6IY/ecH+LgLmOSbEmXiORyLZc0iXLUu4ydCcMASAadMUs912MW7fboDZAMoZ3isFAve+xOm4Cue2s5HMDgmHBqERjU1CjkposvRLIHQUoN6EprDBKo03MpoJjK2hqGpW1NbFZUXCAGOAVr2M6ueYbTQnqqA7M6UWqgTIhkTKpVbUDB1OrAuBNFyjw+pqGYeeoIUup4dxgH4a3PR0w9/5s2nOMu8Dt6SU6ljm6r3JHjlcOFw5l2ndBJt7ttIY/fHAoPoD2IvhugbjiKEXjr/NJHiIX6H4HYDhPYf/UCwTzemyuc3x+JskaomITl2wiKBGQ9FwswEWjTOYg4AozmE+kj5Amm/WaLD2bS/H1Rhy/qJRf77pPRTG59l0LZIjOV4K2Df6Uel5Cze8Ug336itPwFZvd0vb4Ef2dMNe+4qYriMsjY/dp85x2gzFMTyWdXFkig/LKW2BUkhIrbiigOMgb47bdfP+KiQPk7NakofoVOvRFUM1jOLVX1YFBQtI9Qb731YBX0cUOWYqoHPX64sbdLQJLbMp/kYL0BqST9W5WLf2eT9SHrflzvhMEAXd17i3UM0HRj190OWsBXN19VN3SuWY44IDgJJMMO5eBkPcK1yOc/ghsavrnsp+8KDnass/Py9LjrKbPXFWP9jTJGyZ90lcpF0tSSOEhT6gOuDI6hoH8ygyAdzwE1D7yFV8YYPjUJUNICtyg5lzwT+AEFnfCo0kWcb01KZRZcPUVXSTw/li95xUtE8k5zsdGlFI23Io1ksQyzQ3RrzVXPXQWSjT0bUIPt39Xhrs0w6lNfH4LcWUOPajU2RasByXBIoavd3sJgFQkvEPR9JvaS8iD7HMG8wCTOeUjnPhgCjjVcq7yBLRySSGwGV/qKa4gLbybG/E//P7v/48YG0Y6SZh+AKQWNHxS9O3VF/cSWsd6cygJncHwYOLDhBEdnmBxZbT7clmJMHiHuQ9fp4rpxohULKBGrBxKFmCOOWU/H3mkADfVGXvsgeP1Bbyrp6aj3QGgVOd30eryGYHqnJgXd7yY+NOy5QjXBJGu+hIHunRNqFT8GkWouoYTGTLmkYZ0YGTn/b1AtPtRD+9lpxBUlOVa5XVPp9FawCl2CNSy46udTkXtCH0k8t2QPHLtnRgfj9hDeaKpyqzsO4glMvacrbmFhsqYHTvuQGZ2UEfNEWpsEa1od6re8RFl8iocVMxXGrqmaZENTwQrZ0DiwMg8+PZsclObTBWSBjAllw8Z3nCMXDm7nCmMDj4hvm8/eKge9r9pdxnGp7MBN3cHYhYbtsHCEyVcB5gw+QDCGjcJoRRNzwNAs+mHwKssQ6sbOaZEoLpyvq6KLxiI5qLH++B1DaqjLFh8aEcSetHbBbehR69EOo0orQ+E9Kd4X9Tdw2dT6SZGFubYwBS4gpprGmgPgSAhMNofO57zhU0bJGm8qpjLVDqpKivn2Cry962z9tZvNYFGgAvP2gXwqIXocnr0UfazehtMjP/rYPpFJJzlFW3mbbKfqDg4UW7A8LdkBnb8ru2eWVZEifSRcN+zsbpY1usAT1wnR2eeue5CBt2izABdXjJL/pKuLuGuM+TKjRDBaFEGXLgHk3x7tobFjBN5eyBjo4hJcqCjo4lLGQRf/TihBf7sIa+OBweph6mgis+P10ZrzeUTo59Qbm0dEkCai7Jn3AY8pKVrjOo2F6k84Wsr/5yXMht8PnxIKOC7WJ16MjAimH1wEHlYn+LsmLLc852+F24OnzuH0O/QjS218DahFi7l15lUGRE6/0oSeUgVbN9CXnJmKyeMRB+b2nef5kn1KFejPO0+yDOopdb/O6vB0XUXMmVJv4mRSSZhC2eAzVBWmD+aXJUOcV8EX8FMJT7dIvzGMjpZijvn9KeB+wPx+MlhaiSVdLyXmGaH+Vonf1hLv8ZlknJ9CpjfXHyaL1DSvWY5JQk5HbBrUfPWykMekwlPWgXldwOcrvZLOut9uvK4Jo2sA9QagfHfIkJc6luGtKtmI3mbxZGKe3xsYxIS8k31zxT30d7LCMX+OnqoMqz2FpgSrnj9bjOXusE2cx6ca6GftjajR9b/p6GWjZ5lq1at/Lk3257I3l+smZFY46qmC0NuiLpNQrVCBxOIBMd6NSiccPnzUhIEh3PV0S/kLLhARD7Sodqm83pos0HTrUwhGd+ovf1KR309PXXf4u4YnSYRDr941O65syvDoVYHQFY1DB6FvX8Aso0zdNRTUm5NIUEEZ3KBlVsBII6MR3O80EaCIuLRtR5/AmLqumF5mBcS72ZRTUX+2Knrz+1WPfuohLKcw+AWTHOVWGHFWpkZ5abRmwoq4rQvU7fJKvyqk3BSBMG2oDtiWuwk9+X5WJIAkEcvbzLa+bn6/ihis4ElY9IyrKTK3NLRJuVILw2WpGl/vrtln9/DulnKxxG03xRQ4HJ9LlvAkaXB9E5n1mZLYkrGJqY6r15zxImALprkJeGtvAt6YC/aLxbEXAFOim5adsvXOM2XW2xNuuYXwXnbRtmvKppVEe6vSlgrzREXRMxa9+VDjFZLPqb65UfR6q//xdGXNx+N6snrmEdjoSrVvm0toG0SQbRxnONnmHjU40HMQW99SkVHb/HO7ropib7kNStO7HK0OpP+qqIDJTItHM83LZ7PdNbo1WP9LYR26cdSW0iEINAd9XI1y8HYLWa42KI7yd+Na504JC5oDjV7MC7SsGc3CH6E5CN2X6BJ8k0P9Jsf6TQ72W2T/CAz8iPHpQhDdg0fCgWVZYMSBoF1ft/+fcd9YmgM84nh73HIx1J780uadwdGTPdGtZWJO+Age10QgRmABrm+cypvxh1mi7/oLk0JiOzJLDHz4fBdfAo7l8cPsMIzEFgWF+XIFC0iySWL9RGEOfjF0nEJFmE5Z4nZgHRquOImomxmTVEQ3k4ugtwxkyDZFJyybf4bopIi+rajC8XajYmZdFekce0sxmWffJ4Sh1FC4gr5bKOO6S0ijcGdG0Pb+ThBqNITnfKijoo2Z/dPa8XPuacPniwkRPEHY0bn8Mxbgk8UfQypYRwdzK6EXhxxf6jizLjoN9MA+Dx20mjcCWCvL207yTjPJfs73yf28Bpq4t1cy+oA5prGK8gOOsmpKtdfno4idUKijnGWg8cZBgYE+ENJUFP98T+AOZ1AGzGZ3M+cl4YM1cyqzwirrOemQ4Vea6yqDHKknyGvZqHZ7JAeGS3p/pDHtA16JemcklfbrR0u85/3TPIYe6j816Xy97t8TdQhVh7dl9KrW0BwE3gtyr2CER3lA/dTQ6G6+gopLDTjutEF9N2qmQ4D6qDYGSYPFd8PEhxj4TALPwrXZRFum2c+ITcd+pLSvKENG5ASS6Dt6DZSQ0FhZ2EigI0FqdUB5hOVM9WHzKVGsjgqMk9pQORYYL1ndodfUZYUH7Ha4zjkyeGHCuL27GyeKR8ruMdnwgKv4siTyhx6oCc9HSKaEG+0rxHVlSrNHr7e1tFQRTs4ewD8pOxkixS2Iq+maJfJUmtVtT+agd6Xi1bUFua0TvlsXqmsbItpHGDzLVfgPXCDjmKpq0YECWnDQMfILFVFdJj0oo0CzJPDSxaO8qahk/EGUwRb69RB4BguUL2PXGPyBlIhl3cfvDhzKjSYizSxdA1NgoaLF6ARjQvOeovcpUxzRHZDU/f2qJysyCM+yMtQfm6QA8w+G0BgwsSb/adFo/3UEnB9Ajc0o/i8AAP//fUVV6Q==" + return "eJzsfU9z2ziT9z2fAuXLm7zlUe05tfVUZZxndrzJZLx2MnPY2lIgEpIwpgAOANrRU/vht/CXIAmQlAjKTiwdpia21f1Do9HobjQaP4F7tH8L7qsVYgQJxF8BILAo0Ftw8cH98OIVADniGcOlwJS8Bf94BQAA9R+AHRIMZ/LbDBUIcvQWbOArADgSApMNfwv++4Lz4uISXGyFKC/+R/5uS5lYZpSs8eYtWMOCo1cArDEqcv5WMfgJELhDLXjyI/al5MBoVZqfBODJzzVZU7aD8scAkhxwAQXmAmcc0DUoac7BDhK4QTlY7T0+C0PBonEELSRYYo7YA2LuNyFUPchaAnx3cw00QU+W9tOUqf34kmrD28G/KFs8IMYxJY2/sDDv0f6Rsrz1ux6w8iNR3hmUkgMwHBZhEJjMDUJy6AfB0N8V4mLBEKcVy1A6HLeaMspBkHYbAK9Wc2KIke/AyGiZHgBQZMHrrKi4QOxSMeUlzNClk86bXlwPiK3Swfr18+cb0CHZ5pnRPKEoFM8OyS5PIhARS8ko/TQYDIoF6LBoY8nZfsmqhEvzTyS2iAGxRZYHqDjiIGd70GbUBnOPSZvbBCQfMMmliTfUB6ZkV1KCiEjH/sqSBFtI8gKTjS+UXjTt/WMiEmktFUmwpnZmRpiJ5FbbEHQousNsQ1CSa2ywEyHYRRIi3Ga+Q2JLE+qjWpgBop1BU55QDd2I21Qt25LRDHEe5BhSxJDP4dPLymrBUdb5vaWZ02pVtO1eZyBXN18ARxkleRuZ51ygHWV7ua3jHBGxWO1r97DLt6BkE/ildg7fgtiXG6h+ln8EMAGWp8EwBPEBM1HB4pQIDcshgOucL2iJyCKjVcf6DUJrsP5U7VaISYsrCYI1LpD7A8ri08gFZALlCZTmTisM4JhkSJkYo9yWR3ABPEKRbZOpP3pARPAFx/9CeroXqyq7R2Lx/6ODo6u/UBaSvf7FcvwU/CmHoiEAiQDkmAuGV5WKfjCJ6FAcO692s6rrXbWTCvNY4+YKOD8GbEoV9hENQQi4LWDAaHc4dw03UMZb79Pg3vgyUqc9aJF9hJeU8LBreYxKP40uRyWiBtej3sq/QDDb6sFeWr9D/c+qDkYu/YDp0oQvkOTAOYOLMSI50RKxszp6fcy0MGocglqXhQf1MCuwlGHtYndBRAFEmWt+AHJDvs+TSufSJJVfQGQ+s7xiKlm1qI7SrQZb6/xZmlLld1J1djhjdMi/8pFMF0EXC/EdBv3DcWBOaIWcvSmgQCTbN0zOJdhiLuiGwR3QmOL4s4oxuRymC/KarAu82YphVZLUWEUIJpvENsAuw0zgB6S+DQyjfpuARJYv9CQkMQh1utZMLQdQKC5B9rDKsViorTMJe0Uv5CU0GTIkoaE8IU9Lss28NllEQEymJac96Tp6SXLTyh1fCrwLOyk5FO1fDLgGd5Ig6BD0YtLRm8FQWufmC6g43KCAIGLD9qGo70bXYQhQH9XGICkLER4mPsTAZxKw0G020VjNfka4efZz5dROyv2KMmSETyCJ7l8NvJBQKZgY7BGQR8LVioHyAZYOGM3RogxuUjUunsEC5ct1QWHsD60vWSKWdfOXR41ByhdyAC1N+W8TewgqYKGwA1gUNIMCrgokv9c72ALvsPj+RpujNSYo1/Bd2rI2ha/lT6ISAXgNKqK+i/I3C3C9bn1d/o36NQeQIbDDnMsNVIYg8g+/SqJf1T+/cgEFWuofGLuDzNdWVGylVyLZ5oASILZQKECXQGyxPZgFj7gowKpmg4jADBX78JlZQTfjU4ID8v5INzJeWdMDTSV8gLiA4YU53VzGgi8wzioMxXBgvB4q+bjBggyWMMNiPxzi2b98CfLR62y8bKQpfglyUVvOeLFgaRjiKehp/kc4wgBJt9nPSg/q1RIdkJfjZqjfMUqHS7IaAyminXNAUgoSgNQ8okiWK3kpRruthwPnLPO5/c9NJFoQ0QE/cw/4Nw/9gU5wRAPAs/eDx4x5gitsFGLIGzYonptD7E8f6xyTgx9sCd/e3fUvYHdoStk9JhuO4jnFH0Mif+qBAo7EeNP2PNd5bCgnWvNRXSrhBq1hVQQy2Yed/4eHXqdOJSMQ4dQomz0ZIl1CG8Pl7A6lYp2wQudlhI23lApVhcL3XKDdwRHkS/Fkw3LyI6xzqB2WkQmtni7kPkkY+SUQQPrHTIwWBWL6DsWk46YrR8zcyEhz2HTKsvJTlpOfuj41bV2q4hYtSpX/TcfrE9yhcbXP/6IkId9rsmaQC1ZlomKoS/x5l+C69BHT96a05bu6+aLOWQEvERHS+p3LdCcg/H7KdCWEHfyWAMFHF088WaGwKxewsY4rFFalwxXB3wAqabaNKXizyC3Zyu0rljtMyPU0K0vrSnYE9S4aXoIVo/eIgJw+SjdGXY6suNpzLs1eoBZ/wPYH7hIlrFxzVY8Gti2/atWudQYgd+bjEM9QIOWK1rwCt4EJOB7/SYviWrPSrsPtlMVNHOcJy23d2FSBsSmXVN9MN4Y5avHSoXsKRQpWc6fTIl3DOd8UmJLohPjmV3O/nDyNnndH8QSFujPokp/z/rtCVbqbFXJgSAYIukZ3umPzK32UsfPe+ixgC7nybwwnV6Vr/B3KwAohYn/cEYobcigi8/IQZI0J5tskzllnDHStcKix5JQgfaCDlf9cMrph0nFTMwc5+X9CjyijRPr+TGdoekVw7KhhnqcwJH86bjDPdRXFsYhyVIptUkimbF1TPhYWQ4LhJEa3BkY886uoHwKucQSd0aJAmaDha8bH3QHEmXLLUu4y6ozFUu6ONpC58NfnFsFCbPdJETmqCtuBkFKL5kD2GnnkiG48hpvGodmB4nC7AII5YgvMlzvIReS6+4rSAsH2bfyhZgzbuhtD1s2uYsIFlPEu5gaEo/CqDbJ93/Dg7O7nLfK73ZjLnu7sHphl6H6jjDxkCGwQkcGT7s9jb3kYI97ggJXFlxPxod0tCByQOY5raEQveifhSlLRXABDGWW53pBr+yXwDumflZAJnFUFZOYOr9zxaKZscB5AqL4p4K4MoOzarb4c+RozLpaGFYm0pzn8WsZnC1COU/EANQ/5s/gt1gLODkiyGMBTJwh559BfYxDomxivDb9pOkYTUF73wsAPiATEkdFyvxQ0hKDeXSFvtemI56x70d0qSmPBOS1s95g5kvvnfenScf0cAwn8mNL3c1Rm0bZpYaikTOg+LZgH5qJvAc3aQGbN6A48bnG2VcLRtgHz2jIGIaU9r/kk3Q9JGFAyFot3UgVzKOD0GfvNUAKQc5phtSs8YrHtXUN98xY2oYc7f04PGOpMCOgzWCOOYRtGSzFQKcW+lVIDsvOyTHuk9h+GrFGJda0MYUc7/XneKJ6q/1haxoqkjCn1ItAL4BEOrUZ77LhM3lbpD9NWyRdI/ylnhRMeG38h+O8KAXXOhtdYepvUAxLI8Dgzjor1ssDkPiGY24/SjjPEJRrTciu2jWDyQIsHlC8DGOeyTpZnSC59dgqWOL3mvLu5dk25jPb0TFfa7mySt9/VpIdxWuPhG6wepvOtV0v5ANGnXbBfrt8P8PbzI1NiPu8CuUpfnO+On++ORz7p744rj/V7vzZ+vqUV/pvzLa3OJ90trfN9lw7k832XGPTz7Y2B2xsECak9yWw3+/aDq+AtyhB+UPl+1diJ2NtIUsYAE4HYGmZI3SHs/BRg6WsKe3np0l3z0+k9udFSIf0dhjIBHmBRIfD13772igYxFqpLHC2bseP+Zjg90ZBdTuxHV7DPDBK+w0K8PB37/IQ65g6bzlfZ7GfkrP1yvsV2sIjOF9j8T0c8L+PumlesEmmD0oZ1mi42Na7n0r+mRhTrYeP8z4pEs2/H2HC8k/76TD2J4vvDMIMhJmDkSgfj01tjVjw4LA12vVPxyeE7CBi5i4CXLcgR+ww4xOy9SCGGdyOXYmjcSJ1yClHS/Ls8hDjnEOznqBzCc4i8XHR/jrvFc5qXz8F5eVGnf8/mtKsD7Dm2iDukSfKLaowsN1fXHYq320OZjsiUIEAZ2FGG/D+217Bprmq+W53ihr+Eef2d59FBOfGJ7flosgP8WVqHcwPJdCZjVBfJ78k2gBeYmG0s6vigWwftyx//pF0L5rFz3v4dGoA0nSWnGYN4rPvDV7FoTXLNjuQsqEvwA9VOJdyg5YyFEhrW6LKN5WnwxIs2vHZc3/ZTEk7eNUdFK+Wz/ucOcwdxizZzOGVXwHN/N5C0ecm5v1sP63N/t3N/t3N/t3N/t/pz7u927u927u82ZQrO/d1eQn83vidtx+Pow777aoV07GciwD3Jjjz3Y1WBeNrNd0+yGwnrVpJ2Vv6Qt8G7oFKuuAjAEc+Ed3GdUEcjsAd1NToaU/ahmtzB3Q6TzRxOmOECPDYxf+xYpEm7ffXAHaEhA0hPqC6fegYyXmec9cq2KK+KaU8weJkrR++7e4ThlGmkHzkdF+91ehS7O61QUsu7lF3RT6edSyqe8UYxJaNr3EkqpOEaou15VFWRcLDvhEC7Uhi6MhC1a7hVQTlrX55R72ycpuVhbcJ6Oh2Cc6I2iO2cqA0BPCdqz4naw4R8TtSeE7XNIZwTtedE7Rh050TtOVF7TtQGRnh+iCM2jvNDHJERnx/i2D3nhzi4y5gkW9IlIrlcyyVNohz1LmNnwjAAkkFnzFLP9ZhFuz16AyRDaKc4LNTLHrvTJqDr3loOBzA4JpxaBAY1NQq56eILkexBkFIDutIagwTq9FwKKKaytoZhaVsTmxUVF4gBTsEatrNrnuG0kJ7qwKxOlBooEyIZk2pVG1AwtTow7kSRMo+PaShmnjqClDreHcZBeOvzEVPP/6oN57gL/I5eklOpo9sqd+R45XDhcKZdJ3TS7W5byOM3h8IDaA+i7waoG45iBF47v/QRYqH+RyC2wwT2X71AMI/35grn90eirBEqJmH5NoIiAVnPxQJMBNp0DiKOAKP5RPoIeYJpv9nig5k0f5/V4Yt6ycW++2Q0k1vfpVC2yEwleO3gX6nHJeTsXjHItx8pLX+G2T1dry/BPxlTzTtuqqK4DDJ2vzbfeQMo89RE8tmVBRIov6wldgUJoeK2IoqDjAF+//23D7goUP5GTSqKX6FTbwTVDJZzS1U9GBQU7SPUW289WAV93JClmOpBjx9u7O0SkOS2zEc5WG9AKkn/WuXi39hkfci6H9c7YTBAV/feYh0DNN3YdbeDFvDVzRfVDZ1rliMOCE4CybBDOThZj3At8vmP4IaGby776buCgx3r7Lw8Pe56yux1xVh/o4xR8hddpXKRNLUkDtKU+oArg2Mo6J/MIEjHc0DNA2/hlTGGT00ClLTALUrOJc8EfkBBJzyqdBHnW5NSmQVXT9FVEs+P5Ute8RKRvNNcbHQpReOtSCNZLMPsEN1ac9VzV4FkY88G1GD7T3W4azOM+tTXhyB31tCjWo1N0WpAMhxS6Gq3tzBYRcILBH2bib2kPMg+RzAvMIlzHtK594aAYw3XKm9gC4ckEpvBlb7iGuLCm4kx/9P/z+7/uLFBtKOk2QdgSkHje0XvTl1RP7FlrDenssAZHB8GDmw4wdEZJkdWmw+3pRiTR4j70HW6uG6caMUCSsTqgUQh5ohj1tOxdxpAQ72R1z4IXn/Qm0p6Ouo9EFpFTje9Hq8hmN6pSUH3u4kPDXuuUE0wyZovYaB754RaxQ9BpJpLKKExUy5pWCdGRs7/G1SL9/XQ3jmNuKIkxyq3ayqdXgtWoUuwhgVX/Xwqck/oI4nnluyBY/fM6GDcHsIbTVVudcdBPIGp93TFLSxM1vTAaR8yo5MyYp5Ia5NgTatD/ZqXKItP8bBipsLYNVWTbGgqWCEbGgdW5sGnZ5OD0ny6gCyQMaFs2PiOc+TCwe1cYWzgEfFt89lb5aD3VbvLOC6VHbipOxi70LAdFo4w+SrAnMEHCMawURitaGIOGJpFPwxeZRlC3dg5LRLFhfN1VXTRWCQHNdYfv2NIDXXZ4kMjgtiT1i64DT1qPdphVGllKLwnxfui/gYum1o/KbIw1xamwAXEVNNYcwAcCYHJ5tD5nDd8yihZ403FVKbaQVVJMd9egdd3na2/drMZLApUYN4+kE8lRI/Ds5eij9XbcHrkRx/bJzLpJKdoK2+T7VTdwYFiC5anJTug83dl98yyKlKkj4Trhp3dzbJGF3jiOiE6+9x1DzLwGm0W4OKKUfKfdHURd40xX2aUCEaLIujSJYD8+6M9NHaMwOsLGQNdXIILFQVdXMo46OLfCSXoHxdhbTwwWD1MHU1kdrw+WnM+jwj9nHpj84gI0kSUPfM+4DElRWtcp7FQ/QlHS/n/vITZ8PvhU0IBx8X6xIuREcH0g4vAw+oEf9OE5Zbn/K1we/DUOZx+h35kqY2vAbVoMbfOvMqAyOlXmtBTqmDrBvqSM1MxeTziwNy+8zxfsk+pAv1550mWQT2l7tdZHZ6uq4g5U+pNnEwqCVMoG3yGqsL0wfyyZIjzKvgCfirh6RbpN4bR0VLMMb8/Bdz3mN9PBksrsaTrpcQ8I9TfK/H7WuI9PpOM81PI9Ob6/WSRmuY1yzFJyOmITYOaL14W8phUeMo6MK8L+HylV9JZ99uN1zVhdA2g3gCU7w4Z8lLHMrxVJRvR2yyeTMzzewODmJB3sm+uuIf+TlY45s/RU5VhtafQlGDV82eLsdwdtonz+FQD/aS9ETW6/jcdvWz0LFOtevXPpcn+XPbmct2EzApHPVUQelvUZRKqFSqQWDwgxrtR6YTDhw+aMDCEu55uKX/BBSLigRbVLpXXW5MFmm59CsHoTv3lTyry++mp6w7/0PAkiXDo1btmx5VNGR69KhC6onHoIPTtC5hllKm7hoJ6cxIJKiiDG7TMChhpZDSC+50mAhQRl7bt6BMYU9cV08usgHg3m3Iq6s9WRW/+uOrRTz2E5RQGP2OSo9wKI87K1CgvjdZMWBG3dYG6XV7pV4WUmyIQpg3VAdtyN6En3ztFAkgSsbzNbOvr5o+riMEKnoRFz7iaInNLQ5uUK7UwXJaq8fXumn12D+9uKRdL3HZTTIHD8blkCU+SBtc3kVmfKYktGZuY6rh6zRkvArZgmpuAt/Ym4I25YL9YHHsBMCW6adkpW+88U2a9PeGWWwjv5TDaE94eNO/nYQKwcNcH7eW2fz7gTF1tkz7xu/UaEyz2+l8fKReX4G5bCXVZmDLwhaBvJZJ//y5Xr+RRom6/DShP6JFtkOSC2y2COSaqZUm2xejB3nHDRLedcwl8ZaChEkXHQtoiqGlV6p6htNXbPFGd+ox1iD7UeNHqcyo5b9Qh3+p/PF2l+fG4nqzEfAQ2ulId9eYS2gYRZHv5GU6230oNDvScjdcXh2QgPf/crqui2Ftug9L07qurGoG/KypgMtPi0UzzGN1s179uDdb/UliHLoG1pXQIAs1BVxCgHLzeQpYrn4Gj/M24bsZTIrXmQKN3JQNdhEaz8Edozqb3JboEX+VQv8qxfpWD/RrZPwIDP2J8ujZHt0WScGBZFhhxIGg3/Oj/ZzxckeYAj6g4GLdcDLUnv0d7Z3D0JLR0t59YXDSCxzURiBFYgOsbp/Jm/GGW6Jv+wqQshR2ZJQbef7qLLwHH8vhhdhhGwr2Cwny5ggUk2SSxfqQwBz8bOk6hIkynLHE7sA4NVy9G1GWZSSqi+/tF0FsGMoqeohOWza8hOikSIlZU4RRIo4hpXRXpHHtLMZln3yeEoWxd+FJDt3bJNfyQRuHOjKDt/Z0g1GgIz/lQR0UbM/untePn3NOGzxcTIniCsKNzH2sswCeLP4ZUsI4O5lZCLw45vvp0Zl10GuiBfR46aDVvBLBW4r2dd59mkv00/JP7eQ00cW+vZPQBc0xjRf4HnC7WlGqvz0cROzRSp2vLQC+UgwIDfUanqSj++Z7AHc6gDJjN7maOsMJnneagbIVVInrSuc9vNNeFHzlSr8LXslEdEEkODJf0/khj2ge8EvX0Syrt1+/IuB5eaVZAuCXYpJKHuqVS1CFUTfdCqeJxcxB4wsk9TBIe5QElbUOju/kCKi414LgDIPXdqJkOAeqj2hgkDdZDDhMfYuAzCbzU12YTTfLbz4hNx36ktK8oQ0bkBJLo04YNlJDQWKXeSKAjQWp1QHmE5Uwle/MpUay0DYyT2lCFHBgvWd002ZTKhQfsdrjO0T74wYRxe3c3ThSPlN1jsuEBV/HHksifeqAmPB8hmRJutK8Q15Upx5Neu3FpqSKcnD2Af1F2MkSKWxBX0zVL5Kk0Cw6fzEHvSsUrNQxyWyd8SjBUajhEtI8weJar8BdcIOOYqgLegZpmcNAx8g8qorpyfVBGgf5V4EcXj/KmopLxB1EGXzWoh8AzWKB8GbtZ4g+kRCzrvkd44FBuNBFpZukamAILFS1GJxgTmvfcQ5gyxRHdAUnd3y96siKD8CwrQ/2xSQowvzCExoCJvbuQFo32X0fA+Q7U2Izi/wIAAP//LBrEPA==" } diff --git a/metricbeat/module/kubernetes/state_pod/_meta/data.json b/metricbeat/module/kubernetes/state_pod/_meta/data.json index 41ba094c58e5..3d0dc66558bb 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/data.json +++ b/metricbeat/module/kubernetes/state_pod/_meta/data.json @@ -6,17 +6,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "ip": "10.244.0.173", + "name": "hello-28564555-zdfjz", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } diff --git a/metricbeat/module/kubernetes/state_pod/_meta/fields.yml b/metricbeat/module/kubernetes/state_pod/_meta/fields.yml index 157ec4ec411c..988851b7a6f8 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/fields.yml +++ b/metricbeat/module/kubernetes/state_pod/_meta/fields.yml @@ -25,3 +25,11 @@ type: keyword description: > Kubernetes pod scheduled status (true, false, unknown) + - name: reason + type: keyword + description: > + The reason the pod is in its current state (Evicted, NodeAffinity, NodeLost, Shutdown or UnexpectedAdmissionError) + - name: ready_time + type: double + description: > + Readiness achieved time in unix timestamp for a pod diff --git a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.10.0.plain.expected b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.10.0.plain.expected index 1a6a2705b727..77419e6472e7 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.10.0.plain.expected +++ b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.10.0.plain.expected @@ -2,18 +2,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "local-path-storage", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-6bc4bddd6b-6vl7d", + "ip": "10.244.0.5", + "name": "kube-state-metrics-7857446fb4-x78j6", "status": { "phase": "running", "ready": "true", + "ready_time": 1698751133, "scheduled": "true" } }, @@ -39,10 +40,11 @@ "MetricSetFields": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "kube-proxy-2nq9k", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675684, "scheduled": "true" } }, @@ -60,18 +62,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.8", - "name": "hello-mwrpw", + "ip": "172.18.0.2", + "name": "kube-controller-manager-kind-control-plane", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1698675686, "scheduled": "true" } }, @@ -96,11 +99,12 @@ }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.7", - "name": "fluentd-elasticsearch-m2tlp", + "ip": "10.244.0.3", + "name": "coredns-5d78c9869d-crtn9", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } }, @@ -125,8 +129,8 @@ }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.9", - "name": "hello-28312520-d5d5s", + "ip": "10.244.0.8", + "name": "hello-mwrpw", "status": { "phase": "succeeded", "ready": "false", @@ -154,11 +158,12 @@ }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.3", - "name": "coredns-5d78c9869d-crtn9", + "ip": "10.244.0.4", + "name": "coredns-5d78c9869d-gskzq", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } }, @@ -176,18 +181,18 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.4", - "name": "coredns-5d78c9869d-gskzq", + "ip": "10.244.0.9", + "name": "hello-28312520-d5d5s", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } }, @@ -213,10 +218,11 @@ "MetricSetFields": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "kube-scheduler-kind-control-plane", + "name": "kindnet-xg6gs", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675698, "scheduled": "true" } }, @@ -241,11 +247,12 @@ }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.5", - "name": "kube-state-metrics-7857446fb4-x78j6", + "ip": "10.244.0.7", + "name": "fluentd-elasticsearch-m2tlp", "status": { "phase": "running", "ready": "true", + "ready_time": 1698751174, "scheduled": "true" } }, @@ -271,10 +278,11 @@ "MetricSetFields": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "kube-controller-manager-kind-control-plane", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675692, "scheduled": "true" } }, @@ -300,10 +308,11 @@ "MetricSetFields": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "kindnet-xg6gs", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675692, "scheduled": "true" } }, @@ -321,18 +330,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "172.18.0.2", - "name": "kube-apiserver-kind-control-plane", + "ip": "10.244.0.6", + "name": "web-0", "status": { "phase": "running", "ready": "true", + "ready_time": 1698751165, "scheduled": "true" } }, @@ -350,18 +360,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.18.0.2", - "ip": "10.244.0.6", - "name": "web-0", + "ip": "10.244.0.2", + "name": "local-path-provisioner-6bc4bddd6b-6vl7d", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } }, @@ -387,10 +398,11 @@ "MetricSetFields": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "etcd-kind-control-plane", + "name": "kube-proxy-2nq9k", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675697, "scheduled": "true" } }, diff --git a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.11.0.plain.expected b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.11.0.plain.expected index 9dd8d1e7d41a..13457ad1b494 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.11.0.plain.expected +++ b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.11.0.plain.expected @@ -9,40 +9,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-controller-manager-kind-control-plane", - "status": { - "phase": "running", - "ready": "true", - "scheduled": "true" - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.pod", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "kind-control-plane" - } - }, - "MetricSetFields": { - "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kindnet-98xlt", + "ip": "10.244.0.4", + "name": "coredns-76f75df574-wfchs", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, @@ -89,18 +61,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.8", - "name": "web-0", + "ip": "172.21.0.2", + "name": "kube-proxy-45qj9", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862200, "scheduled": "true" } }, @@ -125,11 +98,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.40", - "name": "kube-state-metrics-cbc966f68-9kq2v", + "ip": "172.21.0.2", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862195, "scheduled": "true" } }, @@ -147,18 +121,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "local-path-storage", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-7577fdbbfb-wmdwd", + "ip": "172.21.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862197, "scheduled": "true" } }, @@ -183,11 +158,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.4", - "name": "coredns-76f75df574-wfchs", + "ip": "172.21.0.2", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862193, "scheduled": "true" } }, @@ -205,18 +181,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "ip": "10.244.0.2", + "name": "local-path-provisioner-7577fdbbfb-wmdwd", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, @@ -241,11 +218,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-apiserver-kind-control-plane", + "ip": "10.244.0.40", + "name": "kube-state-metrics-cbc966f68-9kq2v", "status": { "phase": "running", "ready": "true", + "ready_time": 1713879978, "scheduled": "true" } }, @@ -271,10 +249,11 @@ "MetricSetFields": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-proxy-45qj9", + "name": "kindnet-98xlt", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862201, "scheduled": "true" } }, @@ -357,11 +336,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.85", - "name": "mycurlpod", + "ip": "10.244.0.6", + "name": "fluentd-elasticsearch-l8b6x", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862304, "scheduled": "true" } }, @@ -415,11 +395,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.3", - "name": "coredns-76f75df574-v8skx", + "ip": "10.244.0.85", + "name": "mycurlpod", "status": { "phase": "running", "ready": "true", + "ready_time": 1713881941, "scheduled": "true" } }, @@ -444,11 +425,42 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.6", - "name": "fluentd-elasticsearch-l8b6x", + "ip": "172.21.0.2", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862189, + "scheduled": "true" + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.pod", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": null, + "ModuleFields": { + "namespace": "default", + "node": { + "name": "kind-control-plane" + } + }, + "MetricSetFields": { + "host_ip": "172.21.0.2", + "ip": "10.244.0.8", + "name": "web-0", + "status": { + "phase": "running", + "ready": "true", + "ready_time": 1713862311, "scheduled": "true" } }, @@ -473,11 +485,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-scheduler-kind-control-plane", + "ip": "10.244.0.3", + "name": "coredns-76f75df574-v8skx", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, diff --git a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.12.0.plain.expected b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.12.0.plain.expected index ff6eac9e4f05..dbe4a449707e 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.12.0.plain.expected +++ b/metricbeat/module/kubernetes/state_pod/_meta/test/ksm.v2.12.0.plain.expected @@ -9,11 +9,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-controller-manager-kind-control-plane", + "ip": "10.244.0.4", + "name": "coredns-76f75df574-wfchs", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, @@ -39,10 +40,11 @@ "MetricSetFields": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kindnet-98xlt", + "name": "kube-proxy-45qj9", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862200, "scheduled": "true" } }, @@ -60,18 +62,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.8", - "name": "web-0", + "ip": "172.21.0.2", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862195, "scheduled": "true" } }, @@ -147,18 +150,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "local-path-storage", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-7577fdbbfb-wmdwd", + "ip": "172.21.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862197, "scheduled": "true" } }, @@ -183,11 +187,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.174", - "name": "kube-state-metrics-5bcd4898-bntgt", + "ip": "172.21.0.2", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862193, "scheduled": "true" } }, @@ -205,18 +210,18 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.4", - "name": "coredns-76f75df574-wfchs", + "ip": "10.244.0.173", + "name": "hello-28564555-zdfjz", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } }, @@ -234,18 +239,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.173", - "name": "hello-28564555-zdfjz", + "ip": "10.244.0.2", + "name": "local-path-provisioner-7577fdbbfb-wmdwd", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, @@ -270,11 +276,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "ip": "10.244.0.174", + "name": "kube-state-metrics-5bcd4898-bntgt", "status": { "phase": "running", "ready": "true", + "ready_time": 1713873343, "scheduled": "true" } }, @@ -300,10 +307,11 @@ "MetricSetFields": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-apiserver-kind-control-plane", + "name": "kindnet-98xlt", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862201, "scheduled": "true" } }, @@ -321,18 +329,18 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-proxy-45qj9", + "ip": "10.244.0.7", + "name": "hello-wlb5q", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } }, @@ -350,18 +358,19 @@ { "RootFields": null, "ModuleFields": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.7", - "name": "hello-wlb5q", + "ip": "10.244.0.6", + "name": "fluentd-elasticsearch-l8b6x", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862304, "scheduled": "true" } }, @@ -379,18 +388,18 @@ { "RootFields": null, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" } }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.3", - "name": "coredns-76f75df574-v8skx", + "ip": "10.244.0.176", + "name": "hello-28564556-gkqsk", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } }, @@ -415,11 +424,13 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.176", - "name": "hello-28564556-gkqsk", + "ip": "10.244.0.8", + "name": "web-0", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862311, + "reason": "evicted", "scheduled": "true" } }, @@ -444,11 +455,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "10.244.0.6", - "name": "fluentd-elasticsearch-l8b6x", + "ip": "172.21.0.2", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862189, "scheduled": "true" } }, @@ -473,11 +485,12 @@ }, "MetricSetFields": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-scheduler-kind-control-plane", + "ip": "10.244.0.3", + "name": "coredns-76f75df574-v8skx", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } }, diff --git a/metricbeat/module/kubernetes/state_pod/_meta/testdata/docs.plain-expected.json b/metricbeat/module/kubernetes/state_pod/_meta/testdata/docs.plain-expected.json index 2f6d43d7933f..92a365f6f2c6 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/testdata/docs.plain-expected.json +++ b/metricbeat/module/kubernetes/state_pod/_meta/testdata/docs.plain-expected.json @@ -6,17 +6,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "ip": "10.244.0.173", + "name": "hello-28564555-zdfjz", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -37,17 +37,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.173", - "name": "hello-28564555-zdfjz", + "ip": "10.244.0.174", + "name": "kube-state-metrics-5bcd4898-bntgt", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713873343, "scheduled": "true" } } @@ -74,11 +75,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.4", - "name": "coredns-76f75df574-wfchs", + "ip": "172.21.0.2", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862195, "scheduled": "true" } } @@ -106,10 +108,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kindnet-98xlt", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862193, "scheduled": "true" } } @@ -130,17 +133,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-proxy-45qj9", + "ip": "10.244.0.2", + "name": "local-path-provisioner-7577fdbbfb-wmdwd", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -161,17 +165,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.172", - "name": "hello-28564554-llbsx", + "ip": "10.244.0.3", + "name": "coredns-76f75df574-v8skx", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -192,17 +197,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.3", - "name": "coredns-76f75df574-v8skx", + "ip": "10.244.0.172", + "name": "hello-28564554-llbsx", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -260,11 +265,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.6", - "name": "fluentd-elasticsearch-l8b6x", + "ip": "172.21.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862197, "scheduled": "true" } } @@ -291,11 +297,11 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-scheduler-kind-control-plane", + "ip": "10.244.0.177", + "name": "mycurlpod", "status": { "phase": "running", - "ready": "true", + "ready": "false", "scheduled": "true" } } @@ -322,11 +328,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.177", - "name": "mycurlpod", + "ip": "10.244.0.6", + "name": "fluentd-elasticsearch-l8b6x", "status": { "phase": "running", - "ready": "false", + "ready": "true", + "ready_time": 1713862304, "scheduled": "true" } } @@ -347,17 +354,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.8", - "name": "web-0", + "ip": "10.244.0.4", + "name": "coredns-76f75df574-wfchs", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -385,10 +393,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-controller-manager-kind-control-plane", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862189, "scheduled": "true" } } @@ -409,17 +418,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.176", - "name": "hello-28564556-gkqsk", + "ip": "172.21.0.2", + "name": "kindnet-98xlt", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862201, "scheduled": "true" } } @@ -440,17 +450,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "local-path-storage", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-7577fdbbfb-wmdwd", + "ip": "10.244.0.8", + "name": "web-0", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862311, "scheduled": "true" } } @@ -471,17 +482,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-apiserver-kind-control-plane", + "ip": "10.244.0.176", + "name": "hello-28564556-gkqsk", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -508,11 +519,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.174", - "name": "kube-state-metrics-5bcd4898-bntgt", + "ip": "172.21.0.2", + "name": "kube-proxy-45qj9", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862200, "scheduled": "true" } } diff --git a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.10.0.plain-expected.json b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.10.0.plain-expected.json index 74de41883552..dbd86f44e726 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.10.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.10.0.plain-expected.json @@ -6,17 +6,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.6", - "name": "web-0", + "ip": "10.244.0.2", + "name": "local-path-provisioner-6bc4bddd6b-6vl7d", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } } @@ -48,6 +49,7 @@ "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } } @@ -68,17 +70,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.9", - "name": "hello-28312520-d5d5s", + "ip": "10.244.0.7", + "name": "fluentd-elasticsearch-m2tlp", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1698751174, "scheduled": "true" } } @@ -99,17 +102,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "172.18.0.2", - "name": "kube-apiserver-kind-control-plane", + "ip": "10.244.0.9", + "name": "hello-28312520-d5d5s", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -130,17 +133,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.8", - "name": "hello-mwrpw", + "ip": "172.18.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1698675684, "scheduled": "true" } } @@ -161,17 +165,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "172.18.0.2", - "name": "kube-controller-manager-kind-control-plane", + "ip": "10.244.0.8", + "name": "hello-mwrpw", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -199,10 +203,11 @@ "pod": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "etcd-kind-control-plane", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675692, "scheduled": "true" } } @@ -223,17 +228,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "local-path-storage", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-6bc4bddd6b-6vl7d", + "ip": "172.18.0.2", + "name": "kindnet-xg6gs", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675698, "scheduled": "true" } } @@ -265,6 +271,7 @@ "status": { "phase": "running", "ready": "true", + "ready_time": 1698751133, "scheduled": "true" } } @@ -292,10 +299,11 @@ "pod": { "host_ip": "172.18.0.2", "ip": "172.18.0.2", - "name": "kube-scheduler-kind-control-plane", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675692, "scheduled": "true" } } @@ -322,11 +330,12 @@ }, "pod": { "host_ip": "172.18.0.2", - "ip": "172.18.0.2", - "name": "kindnet-xg6gs", + "ip": "10.244.0.4", + "name": "coredns-5d78c9869d-gskzq", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675701, "scheduled": "true" } } @@ -358,6 +367,7 @@ "status": { "phase": "running", "ready": "true", + "ready_time": 1698675697, "scheduled": "true" } } @@ -378,17 +388,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.7", - "name": "fluentd-elasticsearch-m2tlp", + "ip": "10.244.0.6", + "name": "web-0", "status": { "phase": "running", "ready": "true", + "ready_time": 1698751165, "scheduled": "true" } } @@ -415,11 +426,12 @@ }, "pod": { "host_ip": "172.18.0.2", - "ip": "10.244.0.4", - "name": "coredns-5d78c9869d-gskzq", + "ip": "172.18.0.2", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1698675686, "scheduled": "true" } } diff --git a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.11.0.plain-expected.json b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.11.0.plain-expected.json index cab0d7b67c3c..c43a3d605993 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.11.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.11.0.plain-expected.json @@ -44,10 +44,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862195, "scheduled": "true" } } @@ -74,11 +75,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.4", - "name": "coredns-76f75df574-wfchs", + "ip": "172.21.0.2", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862193, "scheduled": "true" } } @@ -99,17 +101,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.85", - "name": "mycurlpod", + "ip": "10.244.0.2", + "name": "local-path-provisioner-7577fdbbfb-wmdwd", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -130,17 +133,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kindnet-98xlt", + "ip": "10.244.0.81", + "name": "hello-28564697-8dmzw", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -161,17 +164,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.81", - "name": "hello-28564697-8dmzw", + "ip": "10.244.0.3", + "name": "coredns-76f75df574-v8skx", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -192,17 +196,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-proxy-45qj9", + "ip": "10.244.0.7", + "name": "hello-wlb5q", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -229,11 +233,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.40", - "name": "kube-state-metrics-cbc966f68-9kq2v", + "ip": "172.21.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862197, "scheduled": "true" } } @@ -254,17 +259,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.3", - "name": "coredns-76f75df574-v8skx", + "ip": "10.244.0.84", + "name": "hello-28564698-tv8jb", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -285,17 +290,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.7", - "name": "hello-wlb5q", + "ip": "10.244.0.6", + "name": "fluentd-elasticsearch-l8b6x", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862304, "scheduled": "true" } } @@ -322,11 +328,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.6", - "name": "fluentd-elasticsearch-l8b6x", + "ip": "10.244.0.4", + "name": "coredns-76f75df574-wfchs", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -354,10 +361,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-scheduler-kind-control-plane", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862189, "scheduled": "true" } } @@ -378,17 +386,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.84", - "name": "hello-28564698-tv8jb", + "ip": "172.21.0.2", + "name": "kindnet-98xlt", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862201, "scheduled": "true" } } @@ -420,6 +429,7 @@ "status": { "phase": "running", "ready": "true", + "ready_time": 1713862311, "scheduled": "true" } } @@ -446,11 +456,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-controller-manager-kind-control-plane", + "ip": "10.244.0.85", + "name": "mycurlpod", "status": { "phase": "running", "ready": "true", + "ready_time": 1713881941, "scheduled": "true" } } @@ -471,17 +482,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "local-path-storage", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-7577fdbbfb-wmdwd", + "ip": "10.244.0.40", + "name": "kube-state-metrics-cbc966f68-9kq2v", "status": { "phase": "running", "ready": "true", + "ready_time": 1713879978, "scheduled": "true" } } @@ -509,10 +521,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-apiserver-kind-control-plane", + "name": "kube-proxy-45qj9", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862200, "scheduled": "true" } } diff --git a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.12.0.plain-expected.json b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.12.0.plain-expected.json index 2f6d43d7933f..884759e3d5f3 100644 --- a/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.12.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_pod/_meta/testdata/ksm.v2.12.0.plain-expected.json @@ -6,17 +6,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "etcd-kind-control-plane", + "ip": "10.244.0.173", + "name": "hello-28564555-zdfjz", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -37,17 +37,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.173", - "name": "hello-28564555-zdfjz", + "ip": "10.244.0.174", + "name": "kube-state-metrics-5bcd4898-bntgt", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713873343, "scheduled": "true" } } @@ -74,11 +75,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.4", - "name": "coredns-76f75df574-wfchs", + "ip": "172.21.0.2", + "name": "kube-apiserver-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862195, "scheduled": "true" } } @@ -106,10 +108,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kindnet-98xlt", + "name": "kube-controller-manager-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862193, "scheduled": "true" } } @@ -130,17 +133,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "local-path-storage", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-proxy-45qj9", + "ip": "10.244.0.2", + "name": "local-path-provisioner-7577fdbbfb-wmdwd", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -161,17 +165,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.172", - "name": "hello-28564554-llbsx", + "ip": "10.244.0.3", + "name": "coredns-76f75df574-v8skx", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -192,17 +197,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.3", - "name": "coredns-76f75df574-v8skx", + "ip": "10.244.0.172", + "name": "hello-28564554-llbsx", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -260,11 +265,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.6", - "name": "fluentd-elasticsearch-l8b6x", + "ip": "172.21.0.2", + "name": "kube-scheduler-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862197, "scheduled": "true" } } @@ -291,11 +297,11 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-scheduler-kind-control-plane", + "ip": "10.244.0.177", + "name": "mycurlpod", "status": { "phase": "running", - "ready": "true", + "ready": "false", "scheduled": "true" } } @@ -322,11 +328,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.177", - "name": "mycurlpod", + "ip": "10.244.0.6", + "name": "fluentd-elasticsearch-l8b6x", "status": { "phase": "running", - "ready": "false", + "ready": "true", + "ready_time": 1713862304, "scheduled": "true" } } @@ -347,17 +354,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.8", - "name": "web-0", + "ip": "10.244.0.4", + "name": "coredns-76f75df574-wfchs", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862204, "scheduled": "true" } } @@ -385,10 +393,11 @@ "pod": { "host_ip": "172.21.0.2", "ip": "172.21.0.2", - "name": "kube-controller-manager-kind-control-plane", + "name": "etcd-kind-control-plane", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862189, "scheduled": "true" } } @@ -409,17 +418,18 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.176", - "name": "hello-28564556-gkqsk", + "ip": "172.21.0.2", + "name": "kindnet-98xlt", "status": { - "phase": "succeeded", - "ready": "false", + "phase": "running", + "ready": "true", + "ready_time": 1713862201, "scheduled": "true" } } @@ -440,17 +450,17 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "local-path-storage", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.2", - "name": "local-path-provisioner-7577fdbbfb-wmdwd", + "ip": "10.244.0.176", + "name": "hello-28564556-gkqsk", "status": { - "phase": "running", - "ready": "true", + "phase": "succeeded", + "ready": "false", "scheduled": "true" } } @@ -471,17 +481,19 @@ "module": "kubernetes" }, "kubernetes": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "kind-control-plane" }, "pod": { "host_ip": "172.21.0.2", - "ip": "172.21.0.2", - "name": "kube-apiserver-kind-control-plane", + "ip": "10.244.0.8", + "name": "web-0", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862311, + "reason": "evicted", "scheduled": "true" } } @@ -508,11 +520,12 @@ }, "pod": { "host_ip": "172.21.0.2", - "ip": "10.244.0.174", - "name": "kube-state-metrics-5bcd4898-bntgt", + "ip": "172.21.0.2", + "name": "kube-proxy-45qj9", "status": { "phase": "running", "ready": "true", + "ready_time": 1713862200, "scheduled": "true" } } diff --git a/metricbeat/module/kubernetes/state_pod/state_pod.go b/metricbeat/module/kubernetes/state_pod/state_pod.go index 4ec1f2538937..7f6fb13eeca6 100644 --- a/metricbeat/module/kubernetes/state_pod/state_pod.go +++ b/metricbeat/module/kubernetes/state_pod/state_pod.go @@ -27,10 +27,12 @@ import ( // mapping stores the state metrics we want to fetch and will be used by this metricset var mapping = &p.MetricsMapping{ Metrics: map[string]p.MetricMap{ - "kube_pod_info": p.InfoMetric(), - "kube_pod_status_phase": p.LabelMetric("status.phase", "phase", p.OpLowercaseValue()), - "kube_pod_status_ready": p.LabelMetric("status.ready", "condition", p.OpLowercaseValue()), - "kube_pod_status_scheduled": p.LabelMetric("status.scheduled", "condition", p.OpLowercaseValue()), + "kube_pod_info": p.InfoMetric(), + "kube_pod_status_phase": p.LabelMetric("status.phase", "phase", p.OpLowercaseValue()), + "kube_pod_status_ready": p.LabelMetric("status.ready", "condition", p.OpLowercaseValue()), + "kube_pod_status_scheduled": p.LabelMetric("status.scheduled", "condition", p.OpLowercaseValue()), + "kube_pod_status_reason": p.LabelMetric("status.reason", "reason", p.OpLowercaseValue()), + "kube_pod_status_ready_time": p.Metric("status.ready_time"), }, Labels: map[string]p.LabelMap{ diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 6459dc6bf6a8..f3cab807dfd5 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.21.9 AS build-env +FROM golang:1.21.10 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 62e7a752fbf7..9dea6777c572 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.21.9 +FROM golang:1.21.10 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/metricbeat/module/windows/perfmon/data.go b/metricbeat/module/windows/perfmon/data.go index 9add5c03896a..0391266e65a5 100644 --- a/metricbeat/module/windows/perfmon/data.go +++ b/metricbeat/module/windows/perfmon/data.go @@ -20,6 +20,7 @@ package perfmon import ( + "errors" "fmt" "regexp" "strconv" @@ -48,7 +49,7 @@ func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Eve // The counter has a negative value or the counter was successfully found, but the data returned is not valid. // This error can occur if the counter value is less than the previous value. (Because counter values always increment, the counter value rolls over to zero when it reaches its maximum value.) // This is not an error that stops the application from running successfully and a positive counter value should be retrieved in the later calls. - if val.Err.Error == pdh.PDH_CALC_NEGATIVE_VALUE || val.Err.Error == pdh.PDH_INVALID_DATA { + if errors.Is(val.Err.Error, pdh.PDH_CALC_NEGATIVE_VALUE) || errors.Is(val.Err.Error, pdh.PDH_INVALID_DATA) { re.log.Debugw("Counter value retrieval returned", "error", val.Err.Error, "cstatus", pdh.PdhErrno(val.Err.CStatus), logp.Namespace("perfmon"), "query", counterPath) continue @@ -69,7 +70,9 @@ func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Eve if _, ok := eventMap[eventKey]; !ok { eventMap[eventKey] = &mb.Event{ MetricSetFields: mapstr.M{}, - Error: fmt.Errorf("failed on query=%v: %w", counterPath, val.Err.Error), + } + if val.Err.Error != nil { + eventMap[eventKey].Error = fmt.Errorf("failed on query=%v: %w", counterPath, val.Err.Error) } if val.Instance != "" { // will ignore instance index @@ -93,9 +96,11 @@ func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Eve } } // Write the values into the map. - var events []mb.Event + events := make([]mb.Event, len(eventMap)) + iter := 0 for _, val := range eventMap { - events = append(events, *val) + events[iter] = *val + iter++ } return events } @@ -111,7 +116,7 @@ func (re *Reader) groupToSingleEvent(counters map[string][]pdh.CounterValue) mb. // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). if val.Err.Error != nil { - if val.Err.Error == pdh.PDH_CALC_NEGATIVE_VALUE || val.Err.Error == pdh.PDH_INVALID_DATA { + if errors.Is(val.Err.Error, pdh.PDH_CALC_NEGATIVE_VALUE) || errors.Is(val.Err.Error, pdh.PDH_INVALID_DATA) { re.log.Debugw("Counter value retrieval returned", "error", val.Err.Error, "cstatus", pdh.PdhErrno(val.Err.CStatus), logp.Namespace("perfmon"), "query", counterPath) continue diff --git a/metricbeat/module/windows/perfmon/data_test.go b/metricbeat/module/windows/perfmon/data_test.go index 2e9f15e2de7e..9c4691216b34 100644 --- a/metricbeat/module/windows/perfmon/data_test.go +++ b/metricbeat/module/windows/perfmon/data_test.go @@ -28,6 +28,67 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) +func TestGroupErrors(t *testing.T) { + reader := Reader{ + config: Config{ + GroupMeasurements: true, + }, + query: pdh.Query{}, + log: nil, + counters: []PerfCounter{ + { + QueryField: "datagrams_sent_per_sec", + QueryName: `\UDPv4\Datagrams Sent/sec`, + Format: "float", + ObjectName: "UDPv4", + ObjectField: "object", + ChildQueries: []string{`\UDPv4\Datagrams Sent/sec`}, + }, + { + QueryField: "%_processor_time", + QueryName: `\Processor Information(_Total)\% Processor Time`, + Format: "float", + ObjectName: "Processor Information", + ObjectField: "object", + InstanceName: "_Total", + InstanceField: "instance", + ChildQueries: []string{`\Processor Information(_Total)\% Processor Time`}, + }, + { + QueryField: "current_disk_queue_length", + QueryName: `\PhysicalDisk(_Total)\Current Disk Queue Length`, + Format: "float", + ObjectName: "PhysicalDisk", + ObjectField: "object", + InstanceName: "_Total", + InstanceField: "instance", + ChildQueries: []string{`\PhysicalDisk(_Total)\Current Disk Queue Length`}, + }, + }, + } + + counters := map[string][]pdh.CounterValue{ + `\UDPv4\Datagrams Sent/sec`: { + {Instance: "", Measurement: 23}, + }, + `\Processor Information(_Total)\% Processor Time`: { + {Instance: "_Total", Measurement: 11}, + }, + `\PhysicalDisk(_Total)\Current Disk Queue Length`: { + {Instance: "_Total", Measurement: 20}, + }, + } + + events := reader.groupToEvents(counters) + assert.NotNil(t, events) + assert.Equal(t, 3, len(events)) + + for _, event := range events { + assert.NoError(t, event.Error) + } + +} + func TestGroupToEvents(t *testing.T) { reader := Reader{ config: Config{ diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 686f97b947f4..912cd87cc43c 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 RUN \ apt-get update \ diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index afa963af1551..e84c20b0ea01 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0-b7cc7ef3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0-177234da-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.15.0-b7cc7ef3-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.15.0-177234da-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.15.0-b7cc7ef3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.15.0-177234da-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/x-pack/auditbeat/processors/sessionmd/types/process.go b/x-pack/auditbeat/processors/sessionmd/types/process.go index daf989ef3cd5..8f52a9c5aa59 100644 --- a/x-pack/auditbeat/processors/sessionmd/types/process.go +++ b/x-pack/auditbeat/processors/sessionmd/types/process.go @@ -356,18 +356,6 @@ func (p *Process) ToMap() mapstr.M { "pid": p.PID, "vpid": p.Vpid, "args": p.Args, - "thread": mapstr.M{ - "capabilities": mapstr.M{ - "permitted": p.Thread.Capabilities.Permitted, - "effective": p.Thread.Capabilities.Effective, - }, - }, - "tty": mapstr.M{ - "char_device": mapstr.M{ - "major": p.TTY.CharDevice.Major, - "minor": p.TTY.CharDevice.Minor, - }, - }, "parent": mapstr.M{ "entity_id": p.Parent.EntityID, "executable": p.Parent.Executable, @@ -384,12 +372,6 @@ func (p *Process) ToMap() mapstr.M { }, "pid": p.Parent.PID, "args": p.Parent.Args, - "thread": mapstr.M{ - "capabilities": mapstr.M{ - "permitted": p.Parent.Thread.Capabilities.Permitted, - "effective": p.Parent.Thread.Capabilities.Effective, - }, - }, }, "group_leader": mapstr.M{ "entity_id": p.GroupLeader.EntityID, diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index 7be120941c9f..7ec869e42cc7 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] :type: cel -:mito_version: v1.10.0 +:mito_version: v1.11.0 :mito_docs: https://pkg.go.dev/github.com/elastic/mito@{mito_version} [id="{beatname_lc}-input-{type}"] @@ -765,15 +765,36 @@ observe the activity of the input. [options="header"] |======= -| Metric | Description -| `resource` | URL or path of the input resource. -| `cel_executions` | Number times the CEL program has been executed. -| `batches_received_total` | Number of event arrays received. -| `events_received_total` | Number of events received. -| `batches_published_total` | Number of event arrays published. -| `events_published_total` | Number of events published. -| `cel_processing_time` | Histogram of the elapsed successful CEL program processing times in nanoseconds. -| `batch_processing_time` | Histogram of the elapsed successful batch processing times in nanoseconds (time of receipt to time of ACK for non-empty batches). +| Metric | Description +| `resource` | URL or path of the input resource. +| `cel_executions` | Number times the CEL program has been executed. +| `batches_received_total` | Number of event arrays received. +| `events_received_total` | Number of events received. +| `batches_published_total` | Number of event arrays published. +| `events_published_total` | Number of events published. +| `cel_processing_time` | Histogram of the elapsed successful CEL program processing times in nanoseconds. +| `batch_processing_time` | Histogram of the elapsed successful batch processing times in nanoseconds (time of receipt to time of ACK for non-empty batches). +| `http_request_total` | Total number of processed requests. +| `http_request_errors_total` | Total number of request errors. +| `http_request_delete_total` | Total number of `DELETE` requests. +| `http_request_get_total` | Total number of `GET` requests. +| `http_request_head_total` | Total number of `HEAD` requests. +| `http_request_options_total` | Total number of `OPTIONS` requests. +| `http_request_patch_total` | Total number of `PATCH` requests. +| `http_request_post_total` | Total number of `POST` requests. +| `http_request_put_total` | Total number of `PUT` requests. +| `http_request_body_bytes_total` | Total of the requests body size. +| `http_request_body_bytes` | Histogram of the requests body size. +| `http_response_total` | Total number of responses received. +| `http_response_errors_total` | Total number of response errors. +| `http_response_1xx_total` | Total number of `1xx` responses. +| `http_response_2xx_total` | Total number of `2xx` responses. +| `http_response_3xx_total` | Total number of `3xx` responses. +| `http_response_4xx_total` | Total number of `4xx` responses. +| `http_response_5xx_total` | Total number of `5xx` responses. +| `http_response_body_bytes_total` | Total of the responses body size. +| `http_response_body_bytes` | Histogram of the responses body size. +| `http_round_trip_time` | Histogram of the round trip time. |======= ==== Developer tools diff --git a/x-pack/filebeat/input/awss3/config.go b/x-pack/filebeat/input/awss3/config.go index bf29b641f6d4..b85c3f3871c9 100644 --- a/x-pack/filebeat/input/awss3/config.go +++ b/x-pack/filebeat/input/awss3/config.go @@ -9,6 +9,10 @@ import ( "fmt" "time" + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/dustin/go-humanize" "github.com/elastic/beats/v7/libbeat/common/cfgtype" @@ -222,3 +226,59 @@ func (rc *readerConfig) InitDefaults() { rc.MaxBytes = 10 * humanize.MiByte rc.LineTerminator = readfile.AutoLineTerminator } + +func (c config) getBucketName() string { + if c.NonAWSBucketName != "" { + return c.NonAWSBucketName + } + if c.BucketARN != "" { + return getBucketNameFromARN(c.BucketARN) + } + return "" +} + +func (c config) getBucketARN() string { + if c.NonAWSBucketName != "" { + return c.NonAWSBucketName + } + if c.BucketARN != "" { + return c.BucketARN + } + return "" +} + +// An AWS SDK callback to apply the input configuration's settings to an S3 +// options struct. +// Should be provided as a parameter to s3.NewFromConfig. +func (c config) s3ConfigModifier(o *s3.Options) { + if c.NonAWSBucketName != "" { + o.EndpointResolver = nonAWSBucketResolver{endpoint: c.AWSConfig.Endpoint} + } + + if c.AWSConfig.FIPSEnabled { + o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled + } + o.UsePathStyle = c.PathStyle + + o.Retryer = retry.NewStandard(func(so *retry.StandardOptions) { + so.MaxAttempts = 5 + // Recover quickly when requests start working again + so.NoRetryIncrement = 100 + }) +} + +// An AWS SDK callback to apply the input configuration's settings to an SQS +// options struct. +// Should be provided as a parameter to sqs.NewFromConfig. +func (c config) sqsConfigModifier(o *sqs.Options) { + if c.AWSConfig.FIPSEnabled { + o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled + } +} + +func (c config) getFileSelectors() []fileSelectorConfig { + if len(c.FileSelectors) > 0 { + return c.FileSelectors + } + return []fileSelectorConfig{{ReaderConfig: c.ReaderConfig}} +} diff --git a/x-pack/filebeat/input/awss3/input.go b/x-pack/filebeat/input/awss3/input.go index a8020eb50b82..f0fa31379746 100644 --- a/x-pack/filebeat/input/awss3/input.go +++ b/x-pack/filebeat/input/awss3/input.go @@ -5,33 +5,19 @@ package awss3 import ( - "context" - "errors" "fmt" - "net/url" - "strings" - "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/sqs" - "github.com/aws/smithy-go" "github.com/elastic/beats/v7/filebeat/beater" v2 "github.com/elastic/beats/v7/filebeat/input/v2" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/feature" awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-concert/unison" ) -const ( - inputName = "aws-s3" - sqsAccessDeniedErrorCode = "AccessDeniedException" -) +const inputName = "aws-s3" func Plugin(store beater.StateStore) v2.Plugin { return v2.Plugin{ @@ -57,19 +43,10 @@ func (im *s3InputManager) Create(cfg *conf.C) (v2.Input, error) { return nil, err } - return newInput(config, im.store) -} - -// s3Input is a input for reading logs from S3 when triggered by an SQS message. -type s3Input struct { - config config - awsConfig awssdk.Config - store beater.StateStore - metrics *inputMetrics -} - -func newInput(config config, store beater.StateStore) (*s3Input, error) { awsConfig, err := awscommon.InitializeAWSConfig(config.AWSConfig) + if err != nil { + return nil, fmt.Errorf("initializing AWS config: %w", err) + } if config.AWSConfig.Endpoint != "" { // Add a custom endpointResolver to the awsConfig so that all the requests are routed to this endpoint @@ -82,401 +59,15 @@ func newInput(config config, store beater.StateStore) (*s3Input, error) { }) } - if err != nil { - return nil, fmt.Errorf("failed to initialize AWS credentials: %w", err) - } - - return &s3Input{ - config: config, - awsConfig: awsConfig, - store: store, - }, nil -} - -func (in *s3Input) Name() string { return inputName } - -func (in *s3Input) Test(ctx v2.TestContext) error { - return nil -} - -func (in *s3Input) Run(inputContext v2.Context, pipeline beat.Pipeline) error { - ctx := v2.GoContextFromCanceler(inputContext.Cancelation) - - if in.config.QueueURL != "" { - return in.runQueueReader(ctx, inputContext, pipeline) - } - - if in.config.BucketARN != "" || in.config.NonAWSBucketName != "" { - return in.runS3Poller(ctx, inputContext, pipeline) - } - - return nil -} - -func (in *s3Input) runQueueReader( - ctx context.Context, - inputContext v2.Context, - pipeline beat.Pipeline, -) error { - // Set awsConfig.Region based on the config and queue URL - region, err := chooseRegion(inputContext.Logger, in.config) - if err != nil { - return err - } - in.awsConfig.Region = region - - // Create SQS receiver and S3 notification processor. - receiver, err := in.createSQSReceiver(inputContext, pipeline) - if err != nil { - return fmt.Errorf("failed to initialize sqs receiver: %w", err) - } - defer receiver.metrics.Close() - - // Poll metrics periodically in the background - go pollSqsWaitingMetric(ctx, receiver) - - return receiver.Receive(ctx) -} - -func (in *s3Input) runS3Poller( - ctx context.Context, - inputContext v2.Context, - pipeline beat.Pipeline, -) error { - // Create client for publishing events and receive notification of their ACKs. - client, err := pipeline.ConnectWith(beat.ClientConfig{ - EventListener: awscommon.NewEventACKHandler(), - Processing: beat.ProcessingConfig{ - // This input only produces events with basic types so normalization - // is not required. - EventNormalization: boolPtr(false), - }, - }) - if err != nil { - return fmt.Errorf("failed to create pipeline client: %w", err) - } - defer client.Close() - - // Connect to the registry and create our states lookup - persistentStore, err := in.store.Access() - if err != nil { - return fmt.Errorf("can not access persistent store: %w", err) - } - defer persistentStore.Close() - - states, err := newStates(inputContext, persistentStore) - if err != nil { - return fmt.Errorf("can not start persistent store: %w", err) - } - - // Create S3 receiver and S3 notification processor. - poller, err := in.createS3Poller(inputContext, ctx, client, states) - if err != nil { - return fmt.Errorf("failed to initialize s3 poller: %w", err) - } - defer poller.metrics.Close() - - return poller.Poll(ctx) -} - -func (in *s3Input) createSQSReceiver(ctx v2.Context, pipeline beat.Pipeline) (*sqsReader, error) { - sqsAPI := &awsSQSAPI{ - client: sqs.NewFromConfig(in.awsConfig, func(o *sqs.Options) { - if in.config.AWSConfig.FIPSEnabled { - o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled - } - }), - queueURL: in.config.QueueURL, - apiTimeout: in.config.APITimeout, - visibilityTimeout: in.config.VisibilityTimeout, - longPollWaitTime: in.config.SQSWaitTime, - } - - s3API := &awsS3API{ - client: s3.NewFromConfig(in.awsConfig, func(o *s3.Options) { - if in.config.AWSConfig.FIPSEnabled { - o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled - } - o.UsePathStyle = in.config.PathStyle - }), - } - - log := ctx.Logger.With("queue_url", in.config.QueueURL) - log.Infof("AWS api_timeout is set to %v.", in.config.APITimeout) - log.Infof("AWS region is set to %v.", in.awsConfig.Region) - log.Infof("AWS SQS visibility_timeout is set to %v.", in.config.VisibilityTimeout) - log.Infof("AWS SQS max_number_of_messages is set to %v.", in.config.MaxNumberOfMessages) - - if in.config.BackupConfig.GetBucketName() != "" { - log.Warnf("You have the backup_to_bucket functionality activated with SQS. Please make sure to set appropriate destination buckets" + - "or prefixes to avoid an infinite loop.") - } - - fileSelectors := in.config.FileSelectors - if len(in.config.FileSelectors) == 0 { - fileSelectors = []fileSelectorConfig{{ReaderConfig: in.config.ReaderConfig}} - } - script, err := newScriptFromConfig(log.Named("sqs_script"), in.config.SQSScript) - if err != nil { - return nil, err - } - in.metrics = newInputMetrics(ctx.ID, nil, in.config.MaxNumberOfMessages) - - s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), in.metrics, s3API, fileSelectors, in.config.BackupConfig, in.config.MaxNumberOfMessages) - - sqsMessageHandler := newSQSS3EventProcessor(log.Named("sqs_s3_event"), in.metrics, sqsAPI, script, in.config.VisibilityTimeout, in.config.SQSMaxReceiveCount, pipeline, s3EventHandlerFactory, in.config.MaxNumberOfMessages) - - sqsReader := newSQSReader(log.Named("sqs"), in.metrics, sqsAPI, in.config.MaxNumberOfMessages, sqsMessageHandler) - - return sqsReader, nil -} - -type nonAWSBucketResolver struct { - endpoint string -} - -func (n nonAWSBucketResolver) ResolveEndpoint(region string, options s3.EndpointResolverOptions) (awssdk.Endpoint, error) { - return awssdk.Endpoint{URL: n.endpoint, SigningRegion: region, HostnameImmutable: true, Source: awssdk.EndpointSourceCustom}, nil -} - -func (in *s3Input) createS3Poller(ctx v2.Context, cancelCtx context.Context, client beat.Client, states *states) (*s3Poller, error) { - var bucketName string - var bucketID string - if in.config.NonAWSBucketName != "" { - bucketName = in.config.NonAWSBucketName - bucketID = bucketName - } else if in.config.BucketARN != "" { - bucketName = getBucketNameFromARN(in.config.BucketARN) - bucketID = in.config.BucketARN - } - - s3Client := s3.NewFromConfig(in.awsConfig, func(o *s3.Options) { - if in.config.NonAWSBucketName != "" { - o.EndpointResolver = nonAWSBucketResolver{endpoint: in.config.AWSConfig.Endpoint} - } - - if in.config.AWSConfig.FIPSEnabled { - o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled - } - o.UsePathStyle = in.config.PathStyle - - o.Retryer = retry.NewStandard(func(so *retry.StandardOptions) { - so.MaxAttempts = 5 - // Recover quickly when requests start working again - so.NoRetryIncrement = 100 - }) - }) - regionName, err := getRegionForBucket(cancelCtx, s3Client, bucketName) - if err != nil { - return nil, fmt.Errorf("failed to get AWS region for bucket: %w", err) - } - - originalAwsConfigRegion := in.awsConfig.Region - - in.awsConfig.Region = regionName - - if regionName != originalAwsConfigRegion { - s3Client = s3.NewFromConfig(in.awsConfig, func(o *s3.Options) { - if in.config.NonAWSBucketName != "" { - o.EndpointResolver = nonAWSBucketResolver{endpoint: in.config.AWSConfig.Endpoint} - } - - if in.config.AWSConfig.FIPSEnabled { - o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled - } - o.UsePathStyle = in.config.PathStyle - }) - } - - s3API := &awsS3API{ - client: s3Client, - } - - log := ctx.Logger.With("bucket", bucketID) - log.Infof("number_of_workers is set to %v.", in.config.NumberOfWorkers) - log.Infof("bucket_list_interval is set to %v.", in.config.BucketListInterval) - log.Infof("bucket_list_prefix is set to %v.", in.config.BucketListPrefix) - log.Infof("AWS region is set to %v.", in.awsConfig.Region) - - fileSelectors := in.config.FileSelectors - if len(in.config.FileSelectors) == 0 { - fileSelectors = []fileSelectorConfig{{ReaderConfig: in.config.ReaderConfig}} - } - in.metrics = newInputMetrics(ctx.ID, nil, in.config.MaxNumberOfMessages) - s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), in.metrics, s3API, fileSelectors, in.config.BackupConfig, in.config.MaxNumberOfMessages) - s3Poller := newS3Poller(log.Named("s3_poller"), - in.metrics, - s3API, - client, - s3EventHandlerFactory, - states, - bucketID, - in.config.BucketListPrefix, - in.awsConfig.Region, - getProviderFromDomain(in.config.AWSConfig.Endpoint, in.config.ProviderOverride), - in.config.NumberOfWorkers, - in.config.BucketListInterval) - - return s3Poller, nil -} - -var errBadQueueURL = errors.New("QueueURL is not in format: https://sqs.{REGION_ENDPOINT}.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME} or https://{VPC_ENDPOINT}.sqs.{REGION_ENDPOINT}.vpce.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME}") - -func chooseRegion(log *logp.Logger, config config) (string, error) { - urlRegion := getRegionFromQueueURL(config.QueueURL, config.AWSConfig.Endpoint) - if config.RegionName != "" { - // If a region is configured, that takes precedence over the URL. - if log != nil && config.RegionName != urlRegion { - log.Warnf("configured region disagrees with queue_url region (%q != %q): using %q", config.RegionName, urlRegion, config.RegionName) - } - return config.RegionName, nil - } - if urlRegion != "" { - // If no region is configured, fall back on the URL. - return urlRegion, nil - } - // If we can't get the region from the config or the URL, report an error. - return "", fmt.Errorf("failed to get AWS region from queue_url: %w", errBadQueueURL) -} - -// getRegionFromQueueURL returns the region from standard queue URLs, or the -// empty string if it couldn't be determined. -func getRegionFromQueueURL(queueURL, endpoint string) string { - // get region from queueURL - // Example for sqs queue: https://sqs.us-east-1.amazonaws.com/12345678912/test-s3-logs - // Example for vpce: https://vpce-test.sqs.us-east-1.vpce.amazonaws.com/12345678912/sqs-queue - u, err := url.Parse(queueURL) - if err != nil { - return "" - } - - // check for sqs queue url - host := strings.SplitN(u.Host, ".", 3) - if len(host) == 3 && host[0] == "sqs" { - if host[2] == endpoint || (endpoint == "" && strings.HasPrefix(host[2], "amazonaws.")) { - return host[1] - } - } - - // check for vpce url - host = strings.SplitN(u.Host, ".", 5) - if len(host) == 5 && host[1] == "sqs" { - if host[4] == endpoint || (endpoint == "" && strings.HasPrefix(host[4], "amazonaws.")) { - return host[2] - } - } - - return "" -} - -func getRegionForBucket(ctx context.Context, s3Client *s3.Client, bucketName string) (string, error) { - getBucketLocationOutput, err := s3Client.GetBucketLocation(ctx, &s3.GetBucketLocationInput{ - Bucket: awssdk.String(bucketName), - }) - - if err != nil { - return "", err + if config.QueueURL != "" { + return newSQSReaderInput(config, awsConfig), nil } - // Region us-east-1 have a LocationConstraint of null. - if len(getBucketLocationOutput.LocationConstraint) == 0 { - return "us-east-1", nil - } - - return string(getBucketLocationOutput.LocationConstraint), nil -} - -func getBucketNameFromARN(bucketARN string) string { - bucketMetadata := strings.Split(bucketARN, ":") - bucketName := bucketMetadata[len(bucketMetadata)-1] - return bucketName -} - -func getProviderFromDomain(endpoint string, ProviderOverride string) string { - if ProviderOverride != "" { - return ProviderOverride - } - if endpoint == "" { - return "aws" - } - // List of popular S3 SaaS providers - providers := map[string]string{ - "amazonaws.com": "aws", - "c2s.sgov.gov": "aws", - "c2s.ic.gov": "aws", - "amazonaws.com.cn": "aws", - "backblazeb2.com": "backblaze", - "cloudflarestorage.com": "cloudflare", - "wasabisys.com": "wasabi", - "digitaloceanspaces.com": "digitalocean", - "dream.io": "dreamhost", - "scw.cloud": "scaleway", - "googleapis.com": "gcp", - "cloud.it": "arubacloud", - "linodeobjects.com": "linode", - "vultrobjects.com": "vultr", - "appdomain.cloud": "ibm", - "aliyuncs.com": "alibaba", - "oraclecloud.com": "oracle", - "exo.io": "exoscale", - "upcloudobjects.com": "upcloud", - "ilandcloud.com": "iland", - "zadarazios.com": "zadara", - } - - parsedEndpoint, _ := url.Parse(endpoint) - for key, provider := range providers { - // support endpoint with and without scheme (http(s)://abc.xyz, abc.xyz) - constraint := parsedEndpoint.Hostname() - if len(parsedEndpoint.Scheme) == 0 { - constraint = parsedEndpoint.Path - } - if strings.HasSuffix(constraint, key) { - return provider - } - } - return "unknown" -} - -func pollSqsWaitingMetric(ctx context.Context, receiver *sqsReader) { - // Run GetApproximateMessageCount before start of timer to set initial count for sqs waiting metric - // This is to avoid misleading values in metric when sqs messages are processed before the ticker channel kicks in - if shouldReturn := updateMessageCount(receiver, ctx); shouldReturn { - return - } - - t := time.NewTicker(time.Minute) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - if shouldReturn := updateMessageCount(receiver, ctx); shouldReturn { - return - } - } - } -} - -// updateMessageCount runs GetApproximateMessageCount for the given context and updates the receiver metric with the count returning false on no error -// If there is an error, the metric is reinitialized to -1 and true is returned -func updateMessageCount(receiver *sqsReader, ctx context.Context) bool { - count, err := receiver.GetApproximateMessageCount(ctx) - - var apiError smithy.APIError - if errors.As(err, &apiError) { - switch apiError.ErrorCode() { - case sqsAccessDeniedErrorCode: - // stop polling if auth error is encountered - // Set it back to -1 because there is a permission error - receiver.metrics.sqsMessagesWaiting.Set(int64(-1)) - return true - } + if config.BucketARN != "" || config.NonAWSBucketName != "" { + return newS3PollerInput(config, awsConfig, im.store) } - receiver.metrics.sqsMessagesWaiting.Set(int64(count)) - return false + return nil, fmt.Errorf("configuration has no SQS queue URL and no S3 bucket ARN") } // boolPtr returns a pointer to b. diff --git a/x-pack/filebeat/input/awss3/input_benchmark_test.go b/x-pack/filebeat/input/awss3/input_benchmark_test.go index 5d22d1411687..09b7c8bd9d26 100644 --- a/x-pack/filebeat/input/awss3/input_benchmark_test.go +++ b/x-pack/filebeat/input/awss3/input_benchmark_test.go @@ -16,9 +16,7 @@ import ( "time" "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/statestore" - "github.com/elastic/beats/v7/libbeat/statestore/storetest" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" @@ -210,23 +208,24 @@ file_selectors: func benchmarkInputSQS(t *testing.T, maxMessagesInflight int) testing.BenchmarkResult { return testing.Benchmark(func(b *testing.B) { - log := logp.NewLogger(inputName) - metricRegistry := monitoring.NewRegistry() - metrics := newInputMetrics("test_id", metricRegistry, maxMessagesInflight) - sqsAPI := newConstantSQS() - s3API := newConstantS3(t) + var err error pipeline := &fakePipeline{} - conf := makeBenchmarkConfig(t) - s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), metrics, s3API, conf.FileSelectors, backupConfig{}, maxMessagesInflight) - sqsMessageHandler := newSQSS3EventProcessor(log.Named("sqs_s3_event"), metrics, sqsAPI, nil, time.Minute, 5, pipeline, s3EventHandlerFactory, maxMessagesInflight) - sqsReader := newSQSReader(log.Named("sqs"), metrics, sqsAPI, maxMessagesInflight, sqsMessageHandler) + conf := makeBenchmarkConfig(t) + conf.MaxNumberOfMessages = maxMessagesInflight + sqsReader := newSQSReaderInput(conf, aws.Config{}) + sqsReader.log = log.Named("sqs") + sqsReader.metrics = newInputMetrics("test_id", monitoring.NewRegistry(), maxMessagesInflight) + sqsReader.sqs = newConstantSQS() + sqsReader.s3 = newConstantS3(t) + sqsReader.msgHandler, err = sqsReader.createEventProcessor(pipeline) + require.NoError(t, err, "createEventProcessor must succeed") ctx, cancel := context.WithCancel(context.Background()) b.Cleanup(cancel) go func() { - for metrics.sqsMessagesReceivedTotal.Get() < uint64(b.N) { + for sqsReader.metrics.sqsMessagesReceivedTotal.Get() < uint64(b.N) { time.Sleep(5 * time.Millisecond) } cancel() @@ -234,25 +233,21 @@ func benchmarkInputSQS(t *testing.T, maxMessagesInflight int) testing.BenchmarkR b.ResetTimer() start := time.Now() - if err := sqsReader.Receive(ctx); err != nil { - if !errors.Is(err, context.DeadlineExceeded) { - t.Fatal(err) - } - } + sqsReader.run(ctx) b.StopTimer() elapsed := time.Since(start) b.ReportMetric(float64(maxMessagesInflight), "max_messages_inflight") b.ReportMetric(elapsed.Seconds(), "sec") - b.ReportMetric(float64(metrics.s3EventsCreatedTotal.Get()), "events") - b.ReportMetric(float64(metrics.s3EventsCreatedTotal.Get())/elapsed.Seconds(), "events_per_sec") + b.ReportMetric(float64(sqsReader.metrics.s3EventsCreatedTotal.Get()), "events") + b.ReportMetric(float64(sqsReader.metrics.s3EventsCreatedTotal.Get())/elapsed.Seconds(), "events_per_sec") - b.ReportMetric(float64(metrics.s3BytesProcessedTotal.Get()), "s3_bytes") - b.ReportMetric(float64(metrics.s3BytesProcessedTotal.Get())/elapsed.Seconds(), "s3_bytes_per_sec") + b.ReportMetric(float64(sqsReader.metrics.s3BytesProcessedTotal.Get()), "s3_bytes") + b.ReportMetric(float64(sqsReader.metrics.s3BytesProcessedTotal.Get())/elapsed.Seconds(), "s3_bytes_per_sec") - b.ReportMetric(float64(metrics.sqsMessagesDeletedTotal.Get()), "sqs_messages") - b.ReportMetric(float64(metrics.sqsMessagesDeletedTotal.Get())/elapsed.Seconds(), "sqs_messages_per_sec") + b.ReportMetric(float64(sqsReader.metrics.sqsMessagesDeletedTotal.Get()), "sqs_messages") + b.ReportMetric(float64(sqsReader.metrics.sqsMessagesDeletedTotal.Get())/elapsed.Seconds(), "sqs_messages_per_sec") }) } @@ -314,6 +309,7 @@ func benchmarkInputS3(t *testing.T, numberOfWorkers int) testing.BenchmarkResult }() config := makeBenchmarkConfig(t) + config.NumberOfWorkers = numberOfWorkers b.ResetTimer() start := time.Now() @@ -333,27 +329,28 @@ func benchmarkInputS3(t *testing.T, numberOfWorkers int) testing.BenchmarkResult wg.Add(1) go func(i int, wg *sync.WaitGroup) { defer wg.Done() - listPrefix := fmt.Sprintf("list_prefix_%d", i) + curConfig := config + curConfig.BucketListPrefix = fmt.Sprintf("list_prefix_%d", i) s3API := newConstantS3(t) - s3API.pagerConstant = newS3PagerConstant(listPrefix) - storeReg := statestore.NewRegistry(storetest.NewMemoryStoreBackend()) - store, err := storeReg.Get("test") - if err != nil { - errChan <- fmt.Errorf("failed to access store: %w", err) - return - } + s3API.pagerConstant = newS3PagerConstant(curConfig.BucketListPrefix) + store := openTestStatestore() - states, err := newStates(inputCtx, store) + states, err := newStates(nil, store) assert.NoError(t, err, "states creation should succeed") - s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), metrics, s3API, config.FileSelectors, backupConfig{}, numberOfWorkers) - s3Poller := newS3Poller(logp.NewLogger(inputName), metrics, s3API, client, s3EventHandlerFactory, states, "bucket", listPrefix, "region", "provider", numberOfWorkers, time.Second) - - if err := s3Poller.Poll(ctx); err != nil { - if !errors.Is(err, context.DeadlineExceeded) { - errChan <- err - } + s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), metrics, s3API, config.FileSelectors, backupConfig{}) + s3Poller := &s3PollerInput{ + log: logp.NewLogger(inputName), + config: config, + metrics: metrics, + s3: s3API, + client: client, + s3ObjectHandler: s3EventHandlerFactory, + states: states, + provider: "provider", } + + s3Poller.run(ctx) }(i, wg) } diff --git a/x-pack/filebeat/input/awss3/input_test.go b/x-pack/filebeat/input/awss3/input_test.go index c76e939424f7..83015c1661be 100644 --- a/x-pack/filebeat/input/awss3/input_test.go +++ b/x-pack/filebeat/input/awss3/input_test.go @@ -5,11 +5,16 @@ package awss3 import ( + "context" "errors" "testing" - aws "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" + + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + "github.com/elastic/elastic-agent-libs/logp" ) func TestGetProviderFromDomain(t *testing.T) { @@ -51,7 +56,7 @@ func TestGetProviderFromDomain(t *testing.T) { } } -func TestGetRegionFromQueueURL(t *testing.T) { +func TestRegionSelection(t *testing.T) { tests := []struct { name string queueURL string @@ -129,9 +134,18 @@ func TestGetRegionFromQueueURL(t *testing.T) { config := config{ QueueURL: test.queueURL, RegionName: test.regionName, - AWSConfig: aws.ConfigAWS{Endpoint: test.endpoint}, + AWSConfig: awscommon.ConfigAWS{Endpoint: test.endpoint}, + } + in := newSQSReaderInput(config, awssdk.Config{}) + inputCtx := v2.Context{ + Logger: logp.NewLogger("awss3_test"), + ID: "test_id", } - got, err := chooseRegion(nil, config) + + // Run setup and verify that it put the correct region in awsConfig.Region + err := in.setup(inputCtx, &fakePipeline{}) + in.cleanup() + got := in.awsConfig.Region // The region passed into the AWS API if !errors.Is(err, test.wantErr) { t.Errorf("unexpected error: got:%v want:%v", err, test.wantErr) } @@ -141,3 +155,12 @@ func TestGetRegionFromQueueURL(t *testing.T) { }) } } + +func newV2Context() (v2.Context, func()) { + ctx, cancel := context.WithCancel(context.Background()) + return v2.Context{ + Logger: logp.NewLogger("awss3_test"), + ID: "test_id", + Cancelation: ctx, + }, cancel +} diff --git a/x-pack/filebeat/input/awss3/s3.go b/x-pack/filebeat/input/awss3/s3.go index 8909f78bb39d..eb8e19c2cf92 100644 --- a/x-pack/filebeat/input/awss3/s3.go +++ b/x-pack/filebeat/input/awss3/s3.go @@ -6,212 +6,118 @@ package awss3 import ( "context" - "errors" "fmt" - "sync" - "time" + "net/url" + "strings" - "github.com/aws/aws-sdk-go-v2/aws/ratelimit" + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common/backoff" awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/go-concert/timed" ) -// var instead of const so it can be reduced during unit tests (instead of waiting -// through 10 minutes of retry backoff) -var readerLoopMaxCircuitBreaker = 10 - -type s3ObjectPayload struct { - s3ObjectHandler s3ObjectHandler - objectState state -} - -type s3Poller struct { - numberOfWorkers int - bucket string - listPrefix string - region string - provider string - bucketPollInterval time.Duration - s3 s3API - log *logp.Logger - metrics *inputMetrics - client beat.Client - s3ObjectHandler s3ObjectHandlerFactory - states *states - workersProcessingMap *sync.Map -} - -func newS3Poller(log *logp.Logger, - metrics *inputMetrics, - s3 s3API, - client beat.Client, - s3ObjectHandler s3ObjectHandlerFactory, - states *states, - bucket string, - listPrefix string, - awsRegion string, - provider string, - numberOfWorkers int, - bucketPollInterval time.Duration, -) *s3Poller { - if metrics == nil { - // Metrics are optional. Initialize a stub. - metrics = newInputMetrics("", nil, 0) +func createS3API(ctx context.Context, config config, awsConfig awssdk.Config) (*awsS3API, error) { + s3Client := s3.NewFromConfig(awsConfig, config.s3ConfigModifier) + regionName, err := getRegionForBucket(ctx, s3Client, config.getBucketName()) + if err != nil { + return nil, fmt.Errorf("failed to get AWS region for bucket: %w", err) } - return &s3Poller{ - numberOfWorkers: numberOfWorkers, - bucket: bucket, - listPrefix: listPrefix, - region: awsRegion, - provider: provider, - bucketPollInterval: bucketPollInterval, - s3: s3, - log: log, - metrics: metrics, - client: client, - s3ObjectHandler: s3ObjectHandler, - states: states, - workersProcessingMap: new(sync.Map), + // Can this really happen? + if regionName != awsConfig.Region { + awsConfig.Region = regionName + s3Client = s3.NewFromConfig(awsConfig, config.s3ConfigModifier) } -} - -func (p *s3Poller) createS3ObjectProcessor(ctx context.Context, state state) s3ObjectHandler { - event := s3EventV2{} - event.AWSRegion = p.region - event.Provider = p.provider - event.S3.Bucket.Name = state.Bucket - event.S3.Bucket.ARN = p.bucket - event.S3.Object.Key = state.Key - acker := awscommon.NewEventACKTracker(ctx) - - return p.s3ObjectHandler.Create(ctx, p.log, p.client, acker, event) + return &awsS3API{ + client: s3Client, + }, nil } -func (p *s3Poller) workerLoop(ctx context.Context, s3ObjectPayloadChan <-chan *s3ObjectPayload) { - rateLimitWaiter := backoff.NewEqualJitterBackoff(ctx.Done(), 1, 120) - - for s3ObjectPayload := range s3ObjectPayloadChan { - objHandler := s3ObjectPayload.s3ObjectHandler - state := s3ObjectPayload.objectState +func createPipelineClient(pipeline beat.Pipeline) (beat.Client, error) { + return pipeline.ConnectWith(beat.ClientConfig{ + EventListener: awscommon.NewEventACKHandler(), + Processing: beat.ProcessingConfig{ + // This input only produces events with basic types so normalization + // is not required. + EventNormalization: boolPtr(false), + }, + }) +} - // Process S3 object (download, parse, create events). - err := objHandler.ProcessS3Object() - if errors.Is(err, errS3DownloadFailed) { - // Download errors are ephemeral. Add a backoff delay, then skip to the - // next iteration so we don't mark the object as permanently failed. - rateLimitWaiter.Wait() - continue - } - // Reset the rate limit delay on results that aren't download errors. - rateLimitWaiter.Reset() +func getRegionForBucket(ctx context.Context, s3Client *s3.Client, bucketName string) (string, error) { + getBucketLocationOutput, err := s3Client.GetBucketLocation(ctx, &s3.GetBucketLocationInput{ + Bucket: awssdk.String(bucketName), + }) - // Wait for downloaded objects to be ACKed. - objHandler.Wait() + if err != nil { + return "", err + } - if err != nil { - p.log.Errorf("failed processing S3 event for object key %q in bucket %q: %v", - state.Key, state.Bucket, err.Error()) + // Region us-east-1 have a LocationConstraint of null. + if len(getBucketLocationOutput.LocationConstraint) == 0 { + return "us-east-1", nil + } - // Non-retryable error. - state.Failed = true - } else { - state.Stored = true - } + return string(getBucketLocationOutput.LocationConstraint), nil +} - // Persist the result - p.states.AddState(state) +func getBucketNameFromARN(bucketARN string) string { + bucketMetadata := strings.Split(bucketARN, ":") + bucketName := bucketMetadata[len(bucketMetadata)-1] + return bucketName +} - // Metrics - p.metrics.s3ObjectsAckedTotal.Inc() +func getProviderFromDomain(endpoint string, ProviderOverride string) string { + if ProviderOverride != "" { + return ProviderOverride + } + if endpoint == "" { + return "aws" + } + // List of popular S3 SaaS providers + providers := map[string]string{ + "amazonaws.com": "aws", + "c2s.sgov.gov": "aws", + "c2s.ic.gov": "aws", + "amazonaws.com.cn": "aws", + "backblazeb2.com": "backblaze", + "cloudflarestorage.com": "cloudflare", + "wasabisys.com": "wasabi", + "digitaloceanspaces.com": "digitalocean", + "dream.io": "dreamhost", + "scw.cloud": "scaleway", + "googleapis.com": "gcp", + "cloud.it": "arubacloud", + "linodeobjects.com": "linode", + "vultrobjects.com": "vultr", + "appdomain.cloud": "ibm", + "aliyuncs.com": "alibaba", + "oraclecloud.com": "oracle", + "exo.io": "exoscale", + "upcloudobjects.com": "upcloud", + "ilandcloud.com": "iland", + "zadarazios.com": "zadara", } -} -func (p *s3Poller) readerLoop(ctx context.Context, s3ObjectPayloadChan chan<- *s3ObjectPayload) { - defer close(s3ObjectPayloadChan) - - bucketName := getBucketNameFromARN(p.bucket) - - errorBackoff := backoff.NewEqualJitterBackoff(ctx.Done(), 1, 120) - circuitBreaker := 0 - paginator := p.s3.ListObjectsPaginator(bucketName, p.listPrefix) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if err != nil { - p.log.Warnw("Error when paginating listing.", "error", err) - // QuotaExceededError is client-side rate limiting in the AWS sdk, - // don't include it in the circuit breaker count - if !errors.As(err, &ratelimit.QuotaExceededError{}) { - circuitBreaker++ - if circuitBreaker >= readerLoopMaxCircuitBreaker { - p.log.Warnw(fmt.Sprintf("%d consecutive error when paginating listing, breaking the circuit.", circuitBreaker), "error", err) - break - } - } - // add a backoff delay and try again - errorBackoff.Wait() - continue + parsedEndpoint, _ := url.Parse(endpoint) + for key, provider := range providers { + // support endpoint with and without scheme (http(s)://abc.xyz, abc.xyz) + constraint := parsedEndpoint.Hostname() + if len(parsedEndpoint.Scheme) == 0 { + constraint = parsedEndpoint.Path } - // Reset the circuit breaker and the error backoff if a read is successful - circuitBreaker = 0 - errorBackoff.Reset() - - totListedObjects := len(page.Contents) - - // Metrics - p.metrics.s3ObjectsListedTotal.Add(uint64(totListedObjects)) - for _, object := range page.Contents { - state := newState(bucketName, *object.Key, *object.ETag, *object.LastModified) - if p.states.IsProcessed(state) { - p.log.Debugw("skipping state.", "state", state) - continue - } - - s3Processor := p.createS3ObjectProcessor(ctx, state) - if s3Processor == nil { - p.log.Debugw("empty s3 processor.", "state", state) - continue - } - - s3ObjectPayloadChan <- &s3ObjectPayload{ - s3ObjectHandler: s3Processor, - objectState: state, - } - - p.metrics.s3ObjectsProcessedTotal.Inc() + if strings.HasSuffix(constraint, key) { + return provider } } + return "unknown" } -func (p *s3Poller) Poll(ctx context.Context) error { - for ctx.Err() == nil { - var workerWg sync.WaitGroup - workChan := make(chan *s3ObjectPayload) - - // Start the worker goroutines to listen on the work channel - for i := 0; i < p.numberOfWorkers; i++ { - workerWg.Add(1) - go func() { - defer workerWg.Done() - p.workerLoop(ctx, workChan) - }() - } - - // Start reading data and wait for its processing to be done - p.readerLoop(ctx, workChan) - workerWg.Wait() - - _ = timed.Wait(ctx, p.bucketPollInterval) - } +type nonAWSBucketResolver struct { + endpoint string +} - if errors.Is(ctx.Err(), context.Canceled) { - // A canceled context is a normal shutdown. - return nil - } - return ctx.Err() +func (n nonAWSBucketResolver) ResolveEndpoint(region string, options s3.EndpointResolverOptions) (awssdk.Endpoint, error) { + return awssdk.Endpoint{URL: n.endpoint, SigningRegion: region, HostnameImmutable: true, Source: awssdk.EndpointSourceCustom}, nil } diff --git a/x-pack/filebeat/input/awss3/s3_input.go b/x-pack/filebeat/input/awss3/s3_input.go new file mode 100644 index 000000000000..999b27da534a --- /dev/null +++ b/x-pack/filebeat/input/awss3/s3_input.go @@ -0,0 +1,246 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awss3 + +import ( + "context" + "errors" + "fmt" + "sync" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" + + "github.com/elastic/beats/v7/filebeat/beater" + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/backoff" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/go-concert/timed" +) + +// var instead of const so it can be reduced during unit tests (instead of waiting +// through 10 minutes of retry backoff) +var readerLoopMaxCircuitBreaker = 10 + +type s3PollerInput struct { + log *logp.Logger + config config + awsConfig awssdk.Config + store beater.StateStore + provider string + s3 s3API + metrics *inputMetrics + client beat.Client + s3ObjectHandler s3ObjectHandlerFactory + states *states +} + +// s3FetchTask contains metadata for one S3 object that a worker should fetch. +type s3FetchTask struct { + s3ObjectHandler s3ObjectHandler + objectState state +} + +func newS3PollerInput( + config config, + awsConfig awssdk.Config, + store beater.StateStore, +) (v2.Input, error) { + + return &s3PollerInput{ + config: config, + awsConfig: awsConfig, + store: store, + }, nil +} + +func (in *s3PollerInput) Name() string { return inputName } + +func (in *s3PollerInput) Test(ctx v2.TestContext) error { + return nil +} + +func (in *s3PollerInput) Run( + inputContext v2.Context, + pipeline beat.Pipeline, +) error { + in.log = inputContext.Logger.Named("s3") + var err error + + // Load the persistent S3 polling state. + in.states, err = newStates(in.log, in.store) + if err != nil { + return fmt.Errorf("can not start persistent store: %w", err) + } + defer in.states.Close() + + // Create client for publishing events and receive notification of their ACKs. + in.client, err = createPipelineClient(pipeline) + if err != nil { + return fmt.Errorf("failed to create pipeline client: %w", err) + } + defer in.client.Close() + + ctx := v2.GoContextFromCanceler(inputContext.Cancelation) + in.s3, err = createS3API(ctx, in.config, in.awsConfig) + if err != nil { + return fmt.Errorf("failed to create S3 API: %w", err) + } + + in.metrics = newInputMetrics(inputContext.ID, nil, in.config.MaxNumberOfMessages) + defer in.metrics.Close() + + in.s3ObjectHandler = newS3ObjectProcessorFactory( + in.log, + in.metrics, + in.s3, + in.config.getFileSelectors(), + in.config.BackupConfig) + + in.run(ctx) + + return nil +} + +func (in *s3PollerInput) run(ctx context.Context) { + // Scan the bucket in a loop, delaying by the configured interval each + // iteration. + for ctx.Err() == nil { + in.runPoll(ctx) + _ = timed.Wait(ctx, in.config.BucketListInterval) + } +} + +func (in *s3PollerInput) runPoll(ctx context.Context) { + var workerWg sync.WaitGroup + workChan := make(chan *s3FetchTask) + + // Start the worker goroutines to listen on the work channel + for i := 0; i < in.config.NumberOfWorkers; i++ { + workerWg.Add(1) + go func() { + defer workerWg.Done() + in.workerLoop(ctx, workChan) + }() + } + + // Start reading data and wait for its processing to be done + in.readerLoop(ctx, workChan) + workerWg.Wait() +} + +func (in *s3PollerInput) workerLoop(ctx context.Context, workChan <-chan *s3FetchTask) { + rateLimitWaiter := backoff.NewEqualJitterBackoff(ctx.Done(), 1, 120) + + for s3ObjectPayload := range workChan { + objHandler := s3ObjectPayload.s3ObjectHandler + state := s3ObjectPayload.objectState + + // Process S3 object (download, parse, create events). + err := objHandler.ProcessS3Object() + if errors.Is(err, errS3DownloadFailed) { + // Download errors are ephemeral. Add a backoff delay, then skip to the + // next iteration so we don't mark the object as permanently failed. + rateLimitWaiter.Wait() + continue + } + // Reset the rate limit delay on results that aren't download errors. + rateLimitWaiter.Reset() + + // Wait for downloaded objects to be ACKed. + objHandler.Wait() + + if err != nil { + in.log.Errorf("failed processing S3 event for object key %q in bucket %q: %v", + state.Key, state.Bucket, err.Error()) + + // Non-retryable error. + state.Failed = true + } else { + state.Stored = true + } + + // Persist the result, report any errors + err = in.states.AddState(state) + if err != nil { + in.log.Errorf("saving completed object state: %v", err.Error()) + } + + // Metrics + in.metrics.s3ObjectsAckedTotal.Inc() + } +} + +func (in *s3PollerInput) readerLoop(ctx context.Context, workChan chan<- *s3FetchTask) { + defer close(workChan) + + bucketName := getBucketNameFromARN(in.config.getBucketARN()) + + errorBackoff := backoff.NewEqualJitterBackoff(ctx.Done(), 1, 120) + circuitBreaker := 0 + paginator := in.s3.ListObjectsPaginator(bucketName, in.config.BucketListPrefix) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if err != nil { + in.log.Warnw("Error when paginating listing.", "error", err) + // QuotaExceededError is client-side rate limiting in the AWS sdk, + // don't include it in the circuit breaker count + if !errors.As(err, &ratelimit.QuotaExceededError{}) { + circuitBreaker++ + if circuitBreaker >= readerLoopMaxCircuitBreaker { + in.log.Warnw(fmt.Sprintf("%d consecutive error when paginating listing, breaking the circuit.", circuitBreaker), "error", err) + break + } + } + // add a backoff delay and try again + errorBackoff.Wait() + continue + } + // Reset the circuit breaker and the error backoff if a read is successful + circuitBreaker = 0 + errorBackoff.Reset() + + totListedObjects := len(page.Contents) + + // Metrics + in.metrics.s3ObjectsListedTotal.Add(uint64(totListedObjects)) + for _, object := range page.Contents { + state := newState(bucketName, *object.Key, *object.ETag, *object.LastModified) + if in.states.IsProcessed(state) { + in.log.Debugw("skipping state.", "state", state) + continue + } + + s3Processor := in.createS3ObjectProcessor(ctx, state) + if s3Processor == nil { + in.log.Debugw("empty s3 processor.", "state", state) + continue + } + + workChan <- &s3FetchTask{ + s3ObjectHandler: s3Processor, + objectState: state, + } + + in.metrics.s3ObjectsProcessedTotal.Inc() + } + } +} + +func (in *s3PollerInput) createS3ObjectProcessor(ctx context.Context, state state) s3ObjectHandler { + event := s3EventV2{} + event.AWSRegion = in.awsConfig.Region + event.Provider = in.provider + event.S3.Bucket.Name = state.Bucket + event.S3.Bucket.ARN = in.config.getBucketARN() + event.S3.Object.Key = state.Key + + acker := awscommon.NewEventACKTracker(ctx) + + return in.s3ObjectHandler.Create(ctx, in.log, in.client, acker, event) +} diff --git a/x-pack/filebeat/input/awss3/s3_objects.go b/x-pack/filebeat/input/awss3/s3_objects.go index 21dfa2243e7b..05ee572343f1 100644 --- a/x-pack/filebeat/input/awss3/s3_objects.go +++ b/x-pack/filebeat/input/awss3/s3_objects.go @@ -48,7 +48,7 @@ type s3ObjectProcessorFactory struct { // retry backoff until the connection is healthy again. var errS3DownloadFailed = errors.New("S3 download failure") -func newS3ObjectProcessorFactory(log *logp.Logger, metrics *inputMetrics, s3 s3API, sel []fileSelectorConfig, backupConfig backupConfig, maxWorkers int) *s3ObjectProcessorFactory { +func newS3ObjectProcessorFactory(log *logp.Logger, metrics *inputMetrics, s3 s3API, sel []fileSelectorConfig, backupConfig backupConfig) *s3ObjectProcessorFactory { if metrics == nil { // Metrics are optional. Initialize a stub. metrics = newInputMetrics("", nil, 0) diff --git a/x-pack/filebeat/input/awss3/s3_objects_test.go b/x-pack/filebeat/input/awss3/s3_objects_test.go index 28e8f4f42a52..d0b4021c7f87 100644 --- a/x-pack/filebeat/input/awss3/s3_objects_test.go +++ b/x-pack/filebeat/input/awss3/s3_objects_test.go @@ -154,7 +154,7 @@ func TestS3ObjectProcessor(t *testing.T) { GetObject(gomock.Any(), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() require.Error(t, err) @@ -176,7 +176,7 @@ func TestS3ObjectProcessor(t *testing.T) { GetObject(gomock.Any(), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(nil, nil) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() require.Error(t, err) @@ -203,7 +203,7 @@ func TestS3ObjectProcessor(t *testing.T) { Times(2), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() require.NoError(t, err) @@ -229,7 +229,7 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() require.NoError(t, err) @@ -259,7 +259,7 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() require.NoError(t, err) @@ -286,7 +286,7 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() require.NoError(t, err) @@ -332,7 +332,7 @@ func _testProcessS3Object(t testing.TB, file, contentType string, numEvents int, Times(numEvents), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, selectors, backupConfig{}, 1) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, selectors, backupConfig{}) ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() diff --git a/x-pack/filebeat/input/awss3/s3_test.go b/x-pack/filebeat/input/awss3/s3_test.go index be1d65b796eb..216d9866e73f 100644 --- a/x-pack/filebeat/input/awss3/s3_test.go +++ b/x-pack/filebeat/input/awss3/s3_test.go @@ -15,8 +15,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - "github.com/elastic/beats/v7/libbeat/statestore" - "github.com/elastic/beats/v7/libbeat/statestore/storetest" "github.com/elastic/elastic-agent-libs/logp" ) @@ -29,11 +27,7 @@ func TestS3Poller(t *testing.T) { const testTimeout = 1 * time.Second t.Run("Poll success", func(t *testing.T) { - storeReg := statestore.NewRegistry(storetest.NewMemoryStoreBackend()) - store, err := storeReg.Get("test") - if err != nil { - t.Fatalf("Failed to access store: %v", err) - } + store := openTestStatestore() ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() @@ -132,58 +126,69 @@ func TestS3Poller(t *testing.T) { GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("2024-02-08T08:35:00+00:02.json.gz")). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, nil, backupConfig{}, numberOfWorkers) - states, err := newStates(inputCtx, store) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, nil, backupConfig{}) + states, err := newStates(nil, store) require.NoError(t, err, "states creation must succeed") - receiver := newS3Poller(logp.NewLogger(inputName), nil, mockAPI, mockPublisher, s3ObjProc, states, bucket, "key", "region", "provider", numberOfWorkers, pollInterval) - require.Error(t, context.DeadlineExceeded, receiver.Poll(ctx)) + poller := &s3PollerInput{ + log: logp.NewLogger(inputName), + config: config{ + NumberOfWorkers: numberOfWorkers, + BucketListInterval: pollInterval, + BucketARN: bucket, + BucketListPrefix: "key", + RegionName: "region", + }, + s3: mockAPI, + client: mockPublisher, + s3ObjectHandler: s3ObjProc, + states: states, + provider: "provider", + metrics: newInputMetrics("", nil, 0), + } + poller.runPoll(ctx) }) t.Run("restart bucket scan after paging errors", func(t *testing.T) { // Change the restart limit to 2 consecutive errors, so the test doesn't // take too long to run readerLoopMaxCircuitBreaker = 2 - storeReg := statestore.NewRegistry(storetest.NewMemoryStoreBackend()) - store, err := storeReg.Get("test") - if err != nil { - t.Fatalf("Failed to access store: %v", err) - } + store := openTestStatestore() ctx, cancel := context.WithTimeout(context.Background(), testTimeout+pollInterval) defer cancel() ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() - mockAPI := NewMockS3API(ctrl) - mockPagerFirst := NewMockS3Pager(ctrl) - mockPagerSecond := NewMockS3Pager(ctrl) + mockS3 := NewMockS3API(ctrl) + mockErrorPager := NewMockS3Pager(ctrl) + mockSuccessPager := NewMockS3Pager(ctrl) mockPublisher := NewMockBeatClient(ctrl) gomock.InOrder( // Initial ListObjectPaginator gets an error. - mockAPI.EXPECT(). + mockS3.EXPECT(). ListObjectsPaginator(gomock.Eq(bucket), gomock.Eq("key")). Times(1). DoAndReturn(func(_, _ string) s3Pager { - return mockPagerFirst + return mockErrorPager }), // After waiting for pollInterval, it retries. - mockAPI.EXPECT(). + mockS3.EXPECT(). ListObjectsPaginator(gomock.Eq(bucket), gomock.Eq("key")). Times(1). DoAndReturn(func(_, _ string) s3Pager { - return mockPagerSecond + return mockSuccessPager }), ) // Initial Next gets an error. - mockPagerFirst.EXPECT(). + mockErrorPager.EXPECT(). HasMorePages(). Times(2). DoAndReturn(func() bool { return true }) - mockPagerFirst.EXPECT(). + mockErrorPager.EXPECT(). NextPage(gomock.Any()). Times(2). DoAndReturn(func(_ context.Context, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { @@ -191,13 +196,13 @@ func TestS3Poller(t *testing.T) { }) // After waiting for pollInterval, it retries. - mockPagerSecond.EXPECT(). + mockSuccessPager.EXPECT(). HasMorePages(). Times(1). DoAndReturn(func() bool { return true }) - mockPagerSecond.EXPECT(). + mockSuccessPager.EXPECT(). NextPage(gomock.Any()). Times(1). DoAndReturn(func(_ context.Context, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { @@ -232,37 +237,60 @@ func TestS3Poller(t *testing.T) { }, nil }) - mockPagerSecond.EXPECT(). + mockSuccessPager.EXPECT(). HasMorePages(). Times(1). DoAndReturn(func() bool { return false }) - mockAPI.EXPECT(). + mockS3.EXPECT(). GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("key1")). Return(nil, errFakeConnectivityFailure) - mockAPI.EXPECT(). + mockS3.EXPECT(). GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("key2")). Return(nil, errFakeConnectivityFailure) - mockAPI.EXPECT(). + mockS3.EXPECT(). GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("key3")). Return(nil, errFakeConnectivityFailure) - mockAPI.EXPECT(). + mockS3.EXPECT(). GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("key4")). Return(nil, errFakeConnectivityFailure) - mockAPI.EXPECT(). + mockS3.EXPECT(). GetObject(gomock.Any(), gomock.Eq(bucket), gomock.Eq("key5")). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, nil, backupConfig{}, numberOfWorkers) - states, err := newStates(inputCtx, store) + s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3, nil, backupConfig{}) + states, err := newStates(nil, store) require.NoError(t, err, "states creation must succeed") - receiver := newS3Poller(logp.NewLogger(inputName), nil, mockAPI, mockPublisher, s3ObjProc, states, bucket, "key", "region", "provider", numberOfWorkers, pollInterval) - require.Error(t, context.DeadlineExceeded, receiver.Poll(ctx)) + poller := &s3PollerInput{ + log: logp.NewLogger(inputName), + config: config{ + NumberOfWorkers: numberOfWorkers, + BucketListInterval: pollInterval, + BucketARN: bucket, + BucketListPrefix: "key", + RegionName: "region", + }, + s3: mockS3, + client: mockPublisher, + s3ObjectHandler: s3ObjProc, + states: states, + provider: "provider", + metrics: newInputMetrics("", nil, 0), + } + poller.run(ctx) }) } + +func TestS3ReaderLoop(t *testing.T) { + +} + +func TestS3WorkerLoop(t *testing.T) { + +} diff --git a/x-pack/filebeat/input/awss3/sqs.go b/x-pack/filebeat/input/awss3/sqs.go index dd454a3bfb92..36985f73720d 100644 --- a/x-pack/filebeat/input/awss3/sqs.go +++ b/x-pack/filebeat/input/awss3/sqs.go @@ -7,111 +7,121 @@ package awss3 import ( "context" "errors" + "net/url" "strconv" - "sync" + "strings" "time" "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/aws/smithy-go" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/go-concert/timed" ) +type messageCountMonitor struct { + sqs sqsAPI + metrics *inputMetrics +} + const ( + sqsAccessDeniedErrorCode = "AccessDeniedException" sqsRetryDelay = 10 * time.Second sqsApproximateNumberOfMessages = "ApproximateNumberOfMessages" ) -type sqsReader struct { - maxMessagesInflight int - workerSem *awscommon.Sem - sqs sqsAPI - msgHandler sqsProcessor - log *logp.Logger - metrics *inputMetrics -} +var errBadQueueURL = errors.New("QueueURL is not in format: https://sqs.{REGION_ENDPOINT}.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME} or https://{VPC_ENDPOINT}.sqs.{REGION_ENDPOINT}.vpce.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME}") -func newSQSReader(log *logp.Logger, metrics *inputMetrics, sqs sqsAPI, maxMessagesInflight int, msgHandler sqsProcessor) *sqsReader { - if metrics == nil { - // Metrics are optional. Initialize a stub. - metrics = newInputMetrics("", nil, 0) - } - return &sqsReader{ - maxMessagesInflight: maxMessagesInflight, - workerSem: awscommon.NewSem(maxMessagesInflight), - sqs: sqs, - msgHandler: msgHandler, - log: log, - metrics: metrics, +func getRegionFromQueueURL(queueURL, endpoint string) string { + // get region from queueURL + // Example for sqs queue: https://sqs.us-east-1.amazonaws.com/12345678912/test-s3-logs + // Example for vpce: https://vpce-test.sqs.us-east-1.vpce.amazonaws.com/12345678912/sqs-queue + u, err := url.Parse(queueURL) + if err != nil { + return "" } -} -func (r *sqsReader) Receive(ctx context.Context) error { - // This loop tries to keep the workers busy as much as possible while - // honoring the max message cap as opposed to a simpler loop that receives - // N messages, waits for them all to finish, then requests N more messages. - var workerWg sync.WaitGroup - for ctx.Err() == nil { - // Determine how many SQS workers are available. - workers, err := r.workerSem.AcquireContext(r.maxMessagesInflight, ctx) - if err != nil { - break + // check for sqs queue url + host := strings.SplitN(u.Host, ".", 3) + if len(host) == 3 && host[0] == "sqs" { + if host[2] == endpoint || (endpoint == "" && strings.HasPrefix(host[2], "amazonaws.")) { + return host[1] } + } - // Receive (at most) as many SQS messages as there are workers. - msgs, err := r.sqs.ReceiveMessage(ctx, workers) - if err != nil { - r.workerSem.Release(workers) + // check for vpce url + host = strings.SplitN(u.Host, ".", 5) + if len(host) == 5 && host[1] == "sqs" { + if host[4] == endpoint || (endpoint == "" && strings.HasPrefix(host[4], "amazonaws.")) { + return host[2] + } + } - if ctx.Err() == nil { - r.log.Warnw("SQS ReceiveMessage returned an error. Will retry after a short delay.", "error", err) + return "" +} - // Throttle retries. - _ = timed.Wait(ctx, sqsRetryDelay) - } - continue +// readSQSMessages reads up to the requested number of SQS messages via +// ReceiveMessage. It always returns at least one result unless the +// context expires +func readSQSMessages( + ctx context.Context, + log *logp.Logger, + sqs sqsAPI, + metrics *inputMetrics, + count int, +) []types.Message { + if count <= 0 { + return nil + } + msgs, err := sqs.ReceiveMessage(ctx, count) + for (err != nil || len(msgs) == 0) && ctx.Err() == nil { + if err != nil { + log.Warnw("SQS ReceiveMessage returned an error. Will retry after a short delay.", "error", err) } - - // Release unused workers. - r.workerSem.Release(workers - len(msgs)) - - // Process each SQS message asynchronously with a goroutine. - r.log.Debugf("Received %v SQS messages.", len(msgs)) - r.metrics.sqsMessagesReceivedTotal.Add(uint64(len(msgs))) - workerWg.Add(len(msgs)) - - for _, msg := range msgs { - go func(msg types.Message, start time.Time) { - id := r.metrics.beginSQSWorker() - defer func() { - r.metrics.endSQSWorker(id) - workerWg.Done() - r.workerSem.Release(1) - }() - - if err := r.msgHandler.ProcessSQS(ctx, &msg); err != nil { - r.log.Warnw("Failed processing SQS message.", - "error", err, - "message_id", *msg.MessageId, - "elapsed_time_ns", time.Since(start)) - } - }(msg, time.Now()) + // Wait for the retry delay, but stop early if the context is cancelled. + select { + case <-ctx.Done(): + return nil + case <-time.After(sqsRetryDelay): } + msgs, err = sqs.ReceiveMessage(ctx, count) } + log.Debugf("Received %v SQS messages.", len(msgs)) + metrics.sqsMessagesReceivedTotal.Add(uint64(len(msgs))) + return msgs +} - // Wait for all workers to finish. - workerWg.Wait() +func (mcm messageCountMonitor) run(ctx context.Context) { + t := time.NewTicker(time.Minute) + defer t.Stop() + for { + if err := mcm.updateMessageCount(ctx); isSQSAuthError(err) { + // stop polling if auth error is encountered + // Set it back to -1 because there is a permission error + mcm.metrics.sqsMessagesWaiting.Set(int64(-1)) + return + } + select { + case <-ctx.Done(): + return + case <-t.C: + } + } +} - if errors.Is(ctx.Err(), context.Canceled) { - // A canceled context is a normal shutdown. - return nil +// updateMessageCount runs GetApproximateMessageCount and updates the +// sqsMessagesWaiting metric with the result. +// If there is an error, the metric is reinitialized to -1 and true is returned +func (mcm messageCountMonitor) updateMessageCount(ctx context.Context) error { + count, err := mcm.getApproximateMessageCount(ctx) + if err == nil { + mcm.metrics.sqsMessagesWaiting.Set(int64(count)) } - return ctx.Err() + return err } -func (r *sqsReader) GetApproximateMessageCount(ctx context.Context) (int, error) { - attributes, err := r.sqs.GetQueueAttributes(ctx, []types.QueueAttributeName{sqsApproximateNumberOfMessages}) +// Query the approximate message count for the queue via the SQS API. +func (mcm messageCountMonitor) getApproximateMessageCount(ctx context.Context) (int, error) { + attributes, err := mcm.sqs.GetQueueAttributes(ctx, []types.QueueAttributeName{sqsApproximateNumberOfMessages}) if err == nil { if c, found := attributes[sqsApproximateNumberOfMessages]; found { if messagesCount, err := strconv.Atoi(c); err == nil { @@ -121,3 +131,11 @@ func (r *sqsReader) GetApproximateMessageCount(ctx context.Context) (int, error) } return -1, err } + +func isSQSAuthError(err error) bool { + var apiError smithy.APIError + if errors.As(err, &apiError) { + return apiError.ErrorCode() == sqsAccessDeniedErrorCode + } + return false +} diff --git a/x-pack/filebeat/input/awss3/sqs_input.go b/x-pack/filebeat/input/awss3/sqs_input.go new file mode 100644 index 000000000000..e524cf9fd1c7 --- /dev/null +++ b/x-pack/filebeat/input/awss3/sqs_input.go @@ -0,0 +1,259 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awss3 + +import ( + "context" + "fmt" + "sync" + "time" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/elastic-agent-libs/logp" +) + +type sqsReaderInput struct { + config config + awsConfig awssdk.Config + sqs sqsAPI + s3 s3API + msgHandler sqsProcessor + log *logp.Logger + metrics *inputMetrics + + // The expected region based on the queue URL + detectedRegion string + + // Workers send on workRequestChan to indicate they're ready for the next + // message, and the reader loop replies on workResponseChan. + workRequestChan chan struct{} + workResponseChan chan types.Message + + // workerWg is used to wait on worker goroutines during shutdown + workerWg sync.WaitGroup +} + +// Simple wrapper to handle creation of internal channels +func newSQSReaderInput(config config, awsConfig awssdk.Config) *sqsReaderInput { + return &sqsReaderInput{ + config: config, + awsConfig: awsConfig, + workRequestChan: make(chan struct{}, config.MaxNumberOfMessages), + workResponseChan: make(chan types.Message), + } +} + +func (in *sqsReaderInput) Name() string { return inputName } + +func (in *sqsReaderInput) Test(ctx v2.TestContext) error { + return nil +} + +func (in *sqsReaderInput) Run( + inputContext v2.Context, + pipeline beat.Pipeline, +) error { + // Initialize everything for this run + err := in.setup(inputContext, pipeline) + if err != nil { + return err + } + + // Start the main run loop + ctx := v2.GoContextFromCanceler(inputContext.Cancelation) + in.run(ctx) + in.cleanup() + + return nil +} + +// Apply internal initialization based on the parameters of Run, in +// preparation for calling run. setup and run are separate functions so +// tests can apply mocks and overrides before the run loop. +func (in *sqsReaderInput) setup( + inputContext v2.Context, + pipeline beat.Pipeline, +) error { + in.log = inputContext.Logger.With("queue_url", in.config.QueueURL) + + in.detectedRegion = getRegionFromQueueURL(in.config.QueueURL, in.config.AWSConfig.Endpoint) + if in.config.RegionName != "" { + in.awsConfig.Region = in.config.RegionName + } else if in.detectedRegion != "" { + in.awsConfig.Region = in.detectedRegion + } else { + // If we can't get a region from the config or the URL, return an error. + return fmt.Errorf("failed to get AWS region from queue_url: %w", errBadQueueURL) + } + + in.sqs = &awsSQSAPI{ + client: sqs.NewFromConfig(in.awsConfig, in.config.sqsConfigModifier), + + queueURL: in.config.QueueURL, + apiTimeout: in.config.APITimeout, + visibilityTimeout: in.config.VisibilityTimeout, + longPollWaitTime: in.config.SQSWaitTime, + } + + in.s3 = &awsS3API{ + client: s3.NewFromConfig(in.awsConfig, in.config.s3ConfigModifier), + } + + in.metrics = newInputMetrics(inputContext.ID, nil, in.config.MaxNumberOfMessages) + + var err error + in.msgHandler, err = in.createEventProcessor(pipeline) + if err != nil { + return fmt.Errorf("failed to initialize sqs reader: %w", err) + } + return nil +} + +// Release internal resources created during setup (currently just metrics). +// This is its own function so tests can handle the run loop in isolation. +func (in *sqsReaderInput) cleanup() { + if in.metrics != nil { + in.metrics.Close() + } +} + +// Create the main goroutines for the input (workers, message count monitor) +// and begin the run loop. +func (in *sqsReaderInput) run(ctx context.Context) { + in.logConfigSummary() + + // Poll metrics periodically in the background + go messageCountMonitor{ + sqs: in.sqs, + metrics: in.metrics, + }.run(ctx) + + in.startWorkers(ctx) + in.readerLoop(ctx) + + in.workerWg.Wait() +} + +func (in *sqsReaderInput) readerLoop(ctx context.Context) { + // requestCount is the number of outstanding work requests that the + // reader will try to fulfill + requestCount := 0 + for ctx.Err() == nil { + // Block to wait for more requests if requestCount is zero + requestCount += channelRequestCount(ctx, in.workRequestChan, requestCount == 0) + + msgs := readSQSMessages(ctx, in.log, in.sqs, in.metrics, requestCount) + + for _, msg := range msgs { + select { + case <-ctx.Done(): + return + case in.workResponseChan <- msg: + requestCount-- + } + } + } +} + +func (in *sqsReaderInput) workerLoop(ctx context.Context) { + for ctx.Err() == nil { + // Send a work request + select { + case <-ctx.Done(): + // Shutting down + return + case in.workRequestChan <- struct{}{}: + } + // The request is sent, wait for a response + select { + case <-ctx.Done(): + return + case msg := <-in.workResponseChan: + start := time.Now() + + id := in.metrics.beginSQSWorker() + if err := in.msgHandler.ProcessSQS(ctx, &msg); err != nil { + in.log.Warnw("Failed processing SQS message.", + "error", err, + "message_id", *msg.MessageId, + "elapsed_time_ns", time.Since(start)) + } + in.metrics.endSQSWorker(id) + } + } +} + +func (in *sqsReaderInput) startWorkers(ctx context.Context) { + // Start the worker goroutines that will fetch messages via workRequestChan + // and workResponseChan until the input shuts down. + for i := 0; i < in.config.MaxNumberOfMessages; i++ { + in.workerWg.Add(1) + go func() { + defer in.workerWg.Done() + in.workerLoop(ctx) + }() + } +} + +func (in *sqsReaderInput) logConfigSummary() { + log := in.log + log.Infof("AWS api_timeout is set to %v.", in.config.APITimeout) + log.Infof("AWS region is set to %v.", in.awsConfig.Region) + if in.awsConfig.Region != in.detectedRegion { + log.Warnf("configured region disagrees with queue_url region (%q != %q): using %q", in.awsConfig.Region, in.detectedRegion, in.awsConfig.Region) + } + log.Infof("AWS SQS visibility_timeout is set to %v.", in.config.VisibilityTimeout) + log.Infof("AWS SQS max_number_of_messages is set to %v.", in.config.MaxNumberOfMessages) + + if in.config.BackupConfig.GetBucketName() != "" { + log.Warnf("You have the backup_to_bucket functionality activated with SQS. Please make sure to set appropriate destination buckets " + + "or prefixes to avoid an infinite loop.") + } +} + +func (in *sqsReaderInput) createEventProcessor(pipeline beat.Pipeline) (sqsProcessor, error) { + fileSelectors := in.config.getFileSelectors() + s3EventHandlerFactory := newS3ObjectProcessorFactory(in.log.Named("s3"), in.metrics, in.s3, fileSelectors, in.config.BackupConfig) + + script, err := newScriptFromConfig(in.log.Named("sqs_script"), in.config.SQSScript) + if err != nil { + return nil, err + } + return newSQSS3EventProcessor(in.log.Named("sqs_s3_event"), in.metrics, in.sqs, script, in.config.VisibilityTimeout, in.config.SQSMaxReceiveCount, pipeline, s3EventHandlerFactory), nil +} + +// Read all pending requests and return their count. If block is true, +// waits until the result is at least 1, unless the context expires. +func channelRequestCount( + ctx context.Context, + requestChan chan struct{}, + block bool, +) int { + requestCount := 0 + if block { + // Wait until at least one request comes in. + select { + case <-ctx.Done(): + return 0 + case <-requestChan: + requestCount++ + } + } + // Read as many requests as we can without blocking. + for { + select { + case <-requestChan: + requestCount++ + default: + return requestCount + } + } +} diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index 7f95cf564c09..db893e443ac3 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -104,7 +104,6 @@ func newSQSS3EventProcessor( maxReceiveCount int, pipeline beat.Pipeline, s3 s3ObjectHandlerFactory, - maxWorkers int, ) *sqsS3EventProcessor { if metrics == nil { // Metrics are optional. Initialize a stub. diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go index 5ecd72fc4c91..65552525136d 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go @@ -50,7 +50,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&msg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) require.NoError(t, p.ProcessSQS(ctx, &msg)) }) @@ -73,7 +73,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&invalidBodyMsg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) err := p.ProcessSQS(ctx, &invalidBodyMsg) require.Error(t, err) t.Log(err) @@ -95,7 +95,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&emptyRecordsMsg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) require.NoError(t, p.ProcessSQS(ctx, &emptyRecordsMsg)) }) @@ -127,7 +127,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockS3Handler.EXPECT().FinalizeS3Object().Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory) require.NoError(t, p.ProcessSQS(ctx, &msg)) }) @@ -150,7 +150,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockClient.EXPECT().Close(), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) err := p.ProcessSQS(ctx, &msg) t.Log(err) require.Error(t, err) @@ -181,7 +181,7 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&msg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) err := p.ProcessSQS(ctx, &msg) t.Log(err) require.Error(t, err) @@ -227,7 +227,7 @@ func TestSqsProcessor_keepalive(t *testing.T) { mockAPI.EXPECT().ChangeMessageVisibility(gomock.Any(), gomock.Eq(&msg), gomock.Eq(visibilityTimeout)). Times(1).Return(tc.Err) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory) var wg sync.WaitGroup wg.Add(1) p.keepalive(ctx, p.log, &wg, &msg) @@ -239,7 +239,7 @@ func TestSqsProcessor_keepalive(t *testing.T) { func TestSqsProcessor_getS3Notifications(t *testing.T) { logp.TestingSetup() - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, nil, nil, time.Minute, 5, nil, nil, 5) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, nil, nil, time.Minute, 5, nil, nil) t.Run("s3 key is url unescaped", func(t *testing.T) { msg := newSQSMessage(newS3Event("Happy+Face.jpg")) diff --git a/x-pack/filebeat/input/awss3/sqs_test.go b/x-pack/filebeat/input/awss3/sqs_test.go index 5eda5d1885e2..cf82f03c6dec 100644 --- a/x-pack/filebeat/input/awss3/sqs_test.go +++ b/x-pack/filebeat/input/awss3/sqs_test.go @@ -12,11 +12,11 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sqs/types" "github.com/gofrs/uuid" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/logp" ) @@ -39,31 +39,35 @@ func TestSQSReceiver(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() - mockAPI := NewMockSQSAPI(ctrl) + mockSQS := NewMockSQSAPI(ctrl) mockMsgHandler := NewMockSQSProcessor(ctrl) msg := newSQSMessage(newS3Event("log.json")) - gomock.InOrder( - // Initial ReceiveMessage for maxMessages. - mockAPI.EXPECT(). - ReceiveMessage(gomock.Any(), gomock.Eq(maxMessages)). - Times(1). - DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { - // Return single message. - return []types.Message{msg}, nil - }), - - // Follow up ReceiveMessages for either maxMessages-1 or maxMessages - // depending on how long processing of previous message takes. - mockAPI.EXPECT(). - ReceiveMessage(gomock.Any(), gomock.Any()). - Times(1). - DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { - // Stop the test. - cancel() - return nil, nil - }), - ) + // Initial ReceiveMessage for maxMessages. + mockSQS.EXPECT(). + ReceiveMessage(gomock.Any(), gomock.Any()). + Times(1). + DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { + // Return single message. + return []types.Message{msg}, nil + }) + + // Follow up ReceiveMessages for either maxMessages-1 or maxMessages + // depending on how long processing of previous message takes. + mockSQS.EXPECT(). + ReceiveMessage(gomock.Any(), gomock.Any()). + Times(1). + DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { + // Stop the test. + cancel() + return nil, nil + }) + + mockSQS.EXPECT(). + GetQueueAttributes(gomock.Any(), gomock.Eq([]types.QueueAttributeName{sqsApproximateNumberOfMessages})). + DoAndReturn(func(_ context.Context, _ []types.QueueAttributeName) (map[string]string, error) { + return map[string]string{sqsApproximateNumberOfMessages: "10000"}, nil + }).AnyTimes() // Expect the one message returned to have been processed. mockMsgHandler.EXPECT(). @@ -72,9 +76,12 @@ func TestSQSReceiver(t *testing.T) { Return(nil) // Execute sqsReader and verify calls/state. - receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) - require.NoError(t, receiver.Receive(ctx)) - assert.Equal(t, maxMessages, receiver.workerSem.Available()) + sqsReader := newSQSReaderInput(config{MaxNumberOfMessages: maxMessages}, aws.Config{}) + sqsReader.log = logp.NewLogger(inputName) + sqsReader.sqs = mockSQS + sqsReader.msgHandler = mockMsgHandler + sqsReader.metrics = newInputMetrics("", nil, 0) + sqsReader.run(ctx) }) t.Run("retry after ReceiveMessage error", func(t *testing.T) { @@ -83,50 +90,56 @@ func TestSQSReceiver(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() - mockAPI := NewMockSQSAPI(ctrl) + mockSQS := NewMockSQSAPI(ctrl) mockMsgHandler := NewMockSQSProcessor(ctrl) gomock.InOrder( // Initial ReceiveMessage gets an error. - mockAPI.EXPECT(). - ReceiveMessage(gomock.Any(), gomock.Eq(maxMessages)). + mockSQS.EXPECT(). + ReceiveMessage(gomock.Any(), gomock.Any()). Times(1). DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { return nil, errFakeConnectivityFailure }), // After waiting for sqsRetryDelay, it retries. - mockAPI.EXPECT(). - ReceiveMessage(gomock.Any(), gomock.Eq(maxMessages)). + mockSQS.EXPECT(). + ReceiveMessage(gomock.Any(), gomock.Any()). Times(1). DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { cancel() return nil, nil }), ) - - // Execute SQSReceiver and verify calls/state. - receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) - require.NoError(t, receiver.Receive(ctx)) - assert.Equal(t, maxMessages, receiver.workerSem.Available()) + mockSQS.EXPECT(). + GetQueueAttributes(gomock.Any(), gomock.Eq([]types.QueueAttributeName{sqsApproximateNumberOfMessages})). + DoAndReturn(func(_ context.Context, _ []types.QueueAttributeName) (map[string]string, error) { + return map[string]string{sqsApproximateNumberOfMessages: "10000"}, nil + }).AnyTimes() + + // Execute SQSReader and verify calls/state. + sqsReader := newSQSReaderInput(config{MaxNumberOfMessages: maxMessages}, aws.Config{}) + sqsReader.log = logp.NewLogger(inputName) + sqsReader.sqs = mockSQS + sqsReader.msgHandler = mockMsgHandler + sqsReader.metrics = newInputMetrics("", nil, 0) + sqsReader.run(ctx) }) } func TestGetApproximateMessageCount(t *testing.T) { logp.TestingSetup() - const maxMessages = 5 const count = 500 attrName := []types.QueueAttributeName{sqsApproximateNumberOfMessages} attr := map[string]string{"ApproximateNumberOfMessages": "500"} - t.Run("GetApproximateMessageCount success", func(t *testing.T) { + t.Run("getApproximateMessageCount success", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) - mockMsgHandler := NewMockSQSProcessor(ctrl) gomock.InOrder( mockAPI.EXPECT(). @@ -137,10 +150,10 @@ func TestGetApproximateMessageCount(t *testing.T) { }), ) - receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) - receivedCount, err := receiver.GetApproximateMessageCount(ctx) + receivedCount, err := + messageCountMonitor{sqs: mockAPI}.getApproximateMessageCount(ctx) assert.Equal(t, count, receivedCount) - assert.Nil(t, err) + assert.NoError(t, err) }) t.Run("GetApproximateMessageCount error", func(t *testing.T) { @@ -151,7 +164,6 @@ func TestGetApproximateMessageCount(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) - mockMsgHandler := NewMockSQSProcessor(ctrl) gomock.InOrder( mockAPI.EXPECT(). @@ -162,8 +174,7 @@ func TestGetApproximateMessageCount(t *testing.T) { }), ) - receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) - receivedCount, err := receiver.GetApproximateMessageCount(ctx) + receivedCount, err := messageCountMonitor{sqs: mockAPI}.getApproximateMessageCount(ctx) assert.Equal(t, -1, receivedCount) assert.NotNil(t, err) }) @@ -222,3 +233,11 @@ func newS3Event(key string) s3EventV2 { record.S3.Object.Key = key return record } + +func TestSQSReaderLoop(t *testing.T) { + +} + +func TestSQSWorkerLoop(t *testing.T) { + +} diff --git a/x-pack/filebeat/input/awss3/states.go b/x-pack/filebeat/input/awss3/states.go index edbbcc73793e..cb40abbd41f0 100644 --- a/x-pack/filebeat/input/awss3/states.go +++ b/x-pack/filebeat/input/awss3/states.go @@ -5,14 +5,13 @@ package awss3 import ( + "fmt" "strings" "sync" - v2 "github.com/elastic/beats/v7/filebeat/input/v2" - - "github.com/elastic/elastic-agent-libs/logp" - + "github.com/elastic/beats/v7/filebeat/beater" "github.com/elastic/beats/v7/libbeat/statestore" + "github.com/elastic/elastic-agent-libs/logp" ) const awsS3ObjectStatePrefix = "filebeat::aws-s3::state::" @@ -20,8 +19,6 @@ const awsS3ObjectStatePrefix = "filebeat::aws-s3::state::" // states handles list of s3 object state. One must use newStates to instantiate a // file states registry. Using the zero-value is not safe. type states struct { - log *logp.Logger - // Completed S3 object states, indexed by state ID. // statesLock must be held to access states. states map[string]state @@ -34,13 +31,21 @@ type states struct { } // newStates generates a new states registry. -func newStates(ctx v2.Context, store *statestore.Store) (*states, error) { - states := &states{ - log: ctx.Logger.Named("states"), - states: map[string]state{}, - store: store, +func newStates(log *logp.Logger, stateStore beater.StateStore) (*states, error) { + store, err := stateStore.Access() + if err != nil { + return nil, fmt.Errorf("can't access persistent store: %w", err) } - return states, states.loadFromRegistry() + + stateTable, err := loadS3StatesFromRegistry(log, store) + if err != nil { + return nil, fmt.Errorf("loading S3 input state: %w", err) + } + + return &states{ + store: store, + states: stateTable, + }, nil } func (s *states) IsProcessed(state state) bool { @@ -51,8 +56,7 @@ func (s *states) IsProcessed(state state) bool { return ok } -func (s *states) AddState(state state) { - +func (s *states) AddState(state state) error { id := state.ID() // Update in-memory copy s.statesLock.Lock() @@ -61,18 +65,23 @@ func (s *states) AddState(state state) { // Persist to the registry s.storeLock.Lock() + defer s.storeLock.Unlock() key := awsS3ObjectStatePrefix + id if err := s.store.Set(key, state); err != nil { - s.log.Errorw("Failed to write states to the registry", "error", err) + return err } - s.storeLock.Unlock() + return nil } -func (s *states) loadFromRegistry() error { - states := map[string]state{} - +func (s *states) Close() { s.storeLock.Lock() - err := s.store.Each(func(key string, dec statestore.ValueDecoder) (bool, error) { + s.store.Close() + s.storeLock.Unlock() +} + +func loadS3StatesFromRegistry(log *logp.Logger, store *statestore.Store) (map[string]state, error) { + stateTable := map[string]state{} + err := store.Each(func(key string, dec statestore.ValueDecoder) (bool, error) { if !strings.HasPrefix(key, awsS3ObjectStatePrefix) { return true, nil } @@ -81,8 +90,9 @@ func (s *states) loadFromRegistry() error { var st state if err := dec.Decode(&st); err != nil { // Skip this key but continue iteration - s.log.Warnf("invalid S3 state loading object key %v", key) - //nolint:nilerr // One bad object shouldn't stop iteration + if log != nil { + log.Warnf("invalid S3 state loading object key %v", key) + } return true, nil } if !st.Stored && !st.Failed { @@ -93,17 +103,11 @@ func (s *states) loadFromRegistry() error { return true, nil } - states[st.ID()] = st + stateTable[st.ID()] = st return true, nil }) - s.storeLock.Unlock() if err != nil { - return err + return nil, err } - - s.statesLock.Lock() - s.states = states - s.statesLock.Unlock() - - return nil + return stateTable, nil } diff --git a/x-pack/filebeat/input/awss3/states_test.go b/x-pack/filebeat/input/awss3/states_test.go index 2f8bbf58fdfb..dc345d5f88e8 100644 --- a/x-pack/filebeat/input/awss3/states_test.go +++ b/x-pack/filebeat/input/awss3/states_test.go @@ -5,7 +5,6 @@ package awss3 import ( - "context" "testing" "time" @@ -15,9 +14,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - v2 "github.com/elastic/beats/v7/filebeat/input/v2" - "github.com/elastic/elastic-agent-libs/logp" ) type testInputStore struct { @@ -42,11 +38,6 @@ func (s *testInputStore) CleanupInterval() time.Duration { return 24 * time.Hour } -var inputCtx = v2.Context{ - Logger: logp.NewLogger("test"), - Cancelation: context.Background(), -} - func TestStatesAddStateAndIsProcessed(t *testing.T) { type stateTestCase struct { // An initialization callback to invoke on the (initially empty) states. @@ -117,17 +108,13 @@ func TestStatesAddStateAndIsProcessed(t *testing.T) { test := test t.Run(name, func(t *testing.T) { store := openTestStatestore() - persistentStore, err := store.Access() - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - states, err := newStates(inputCtx, persistentStore) + states, err := newStates(nil, store) require.NoError(t, err, "states creation must succeed") if test.statesEdit != nil { test.statesEdit(states) } if test.shouldReload { - states, err = newStates(inputCtx, persistentStore) + states, err = newStates(nil, store) require.NoError(t, err, "states creation must succeed") } diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index 88d7a20b458b..759809e6e80c 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -42,6 +42,7 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring/inputmon" "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/beats/v7/x-pack/filebeat/input/internal/httplog" + "github.com/elastic/beats/v7/x-pack/filebeat/input/internal/httpmon" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/monitoring" @@ -122,7 +123,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p cfg := src.cfg log := env.Logger.With("input_url", cfg.Resource.URL) - metrics := newInputMetrics(env.ID) + metrics, reg := newInputMetrics(env.ID) defer metrics.Close() ctx := ctxtool.FromCanceller(env.Cancelation) @@ -132,7 +133,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p cfg.Resource.Tracer.Filename = strings.ReplaceAll(cfg.Resource.Tracer.Filename, "*", id) } - client, trace, err := newClient(ctx, cfg, log) + client, trace, err := newClient(ctx, cfg, log, reg) if err != nil { return err } @@ -686,7 +687,7 @@ func getLimit(which string, rateLimit map[string]interface{}, log *logp.Logger) return limit, true } -func newClient(ctx context.Context, cfg config, log *logp.Logger) (*http.Client, *httplog.LoggingRoundTripper, error) { +func newClient(ctx context.Context, cfg config, log *logp.Logger, reg *monitoring.Registry) (*http.Client, *httplog.LoggingRoundTripper, error) { if !wantClient(cfg) { return nil, nil, nil } @@ -729,6 +730,10 @@ func newClient(ctx context.Context, cfg config, log *logp.Logger) (*http.Client, c.Transport = trace } + if reg != nil { + c.Transport = httpmon.NewMetricsRoundTripper(c.Transport, reg) + } + c.CheckRedirect = checkRedirect(cfg.Resource, log) if cfg.Resource.Retry.getMaxAttempts() > 1 { @@ -1070,7 +1075,7 @@ type inputMetrics struct { batchProcessingTime metrics.Sample // histogram of the elapsed successful batch processing times in nanoseconds (time of receipt to time of ACK for non-empty batches). } -func newInputMetrics(id string) *inputMetrics { +func newInputMetrics(id string) (*inputMetrics, *monitoring.Registry) { reg, unreg := inputmon.NewInputRegistry(inputName, id, nil) out := &inputMetrics{ unregister: unreg, @@ -1088,7 +1093,7 @@ func newInputMetrics(id string) *inputMetrics { _ = adapter.NewGoMetrics(reg, "batch_processing_time", adapter.Accept). Register("histogram", metrics.NewHistogram(out.batchProcessingTime)) - return out + return out, reg } func (m *inputMetrics) Close() { diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go index 01d2d7070254..558e277d1062 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go @@ -31,9 +31,10 @@ import ( const ( defaultAPIEndpoint = "https://graph.microsoft.com/v1.0" - defaultGroupsQuery = "$select=displayName,members" - defaultUsersQuery = "$select=accountEnabled,userPrincipalName,mail,displayName,givenName,surname,jobTitle,officeLocation,mobilePhone,businessPhones" - defaultDevicesQuery = "$select=accountEnabled,deviceId,displayName,operatingSystem,operatingSystemVersion,physicalIds,extensionAttributes,alternativeSecurityIds" + queryName = "$select" + defaultGroupsQuery = "displayName,members" + defaultUsersQuery = "accountEnabled,userPrincipalName,mail,displayName,givenName,surname,jobTitle,officeLocation,mobilePhone,businessPhones" + defaultDevicesQuery = "accountEnabled,deviceId,displayName,operatingSystem,operatingSystemVersion,physicalIds,extensionAttributes,alternativeSecurityIds" apiGroupType = "#microsoft.graph.group" apiUserType = "#microsoft.graph.user" @@ -353,21 +354,21 @@ func New(cfg *config.C, logger *logp.Logger, auth authenticator.Authenticator) ( if err != nil { return nil, fmt.Errorf("invalid groups URL endpoint: %w", err) } - groupsURL.RawQuery = url.QueryEscape(formatQuery(c.Select.GroupQuery, defaultGroupsQuery)) + groupsURL.RawQuery = formatQuery(queryName, c.Select.GroupQuery, defaultGroupsQuery) f.groupsURL = groupsURL.String() usersURL, err := url.Parse(f.conf.APIEndpoint + "/users/delta") if err != nil { return nil, fmt.Errorf("invalid users URL endpoint: %w", err) } - usersURL.RawQuery = url.QueryEscape(formatQuery(c.Select.UserQuery, defaultUsersQuery)) + usersURL.RawQuery = formatQuery(queryName, c.Select.UserQuery, defaultUsersQuery) f.usersURL = usersURL.String() devicesURL, err := url.Parse(f.conf.APIEndpoint + "/devices/delta") if err != nil { return nil, fmt.Errorf("invalid devices URL endpoint: %w", err) } - devicesURL.RawQuery = url.QueryEscape(formatQuery(c.Select.DeviceQuery, defaultDevicesQuery)) + devicesURL.RawQuery = formatQuery(queryName, c.Select.DeviceQuery, defaultDevicesQuery) f.devicesURL = devicesURL.String() // The API takes a departure from the query approach here, so we @@ -382,11 +383,12 @@ func New(cfg *config.C, logger *logp.Logger, auth authenticator.Authenticator) ( return &f, nil } -func formatQuery(query []string, dflt string) string { - if len(query) == 0 { - return dflt +func formatQuery(name string, query []string, dflt string) string { + q := dflt + if len(query) != 0 { + q = strings.Join(query, ",") } - return "$select=" + strings.Join(query, ",") + return url.Values{name: []string{q}}.Encode() } // newUserFromAPI translates an API-representation of a user to a fetcher.User. diff --git a/x-pack/filebeat/input/internal/httplog/roundtripper.go b/x-pack/filebeat/input/internal/httplog/roundtripper.go index 642245603f8f..ce68147a2a7d 100644 --- a/x-pack/filebeat/input/internal/httplog/roundtripper.go +++ b/x-pack/filebeat/input/internal/httplog/roundtripper.go @@ -115,14 +115,13 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err resp.Body, body, err = copyBody(resp.Body) if err != nil { errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read response body: %s", err)) - } else { - respParts = append(respParts, - zap.ByteString("http.response.body.content", body[:min(len(body), rt.maxBodyLen)]), - zap.Bool("http.response.body.truncated", rt.maxBodyLen < len(body)), - zap.Int("http.response.body.bytes", len(body)), - zap.String("http.response.mime_type", resp.Header.Get("Content-Type")), - ) } + respParts = append(respParts, + zap.ByteString("http.response.body.content", body[:min(len(body), rt.maxBodyLen)]), + zap.Bool("http.response.body.truncated", rt.maxBodyLen < len(body)), + zap.Int("http.response.body.bytes", len(body)), + zap.String("http.response.mime_type", resp.Header.Get("Content-Type")), + ) message, err := httputil.DumpResponse(resp, false) if err != nil { errorsMessages = append(errorsMessages, fmt.Sprintf("failed to dump response: %s", err)) @@ -184,14 +183,13 @@ func logRequest(log *zap.Logger, req *http.Request, maxBodyLen int, extra ...zap req.Body, body, err = copyBody(req.Body) if err != nil { errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read request body: %s", err)) - } else { - reqParts = append(reqParts, - zap.ByteString("http.request.body.content", body[:min(len(body), maxBodyLen)]), - zap.Bool("http.request.body.truncated", maxBodyLen < len(body)), - zap.Int("http.request.body.bytes", len(body)), - zap.String("http.request.mime_type", req.Header.Get("Content-Type")), - ) } + reqParts = append(reqParts, + zap.ByteString("http.request.body.content", body[:min(len(body), maxBodyLen)]), + zap.Bool("http.request.body.truncated", maxBodyLen < len(body)), + zap.Int("http.request.body.bytes", len(body)), + zap.String("http.request.mime_type", req.Header.Get("Content-Type")), + ) message, err := httputil.DumpRequestOut(req, false) if err != nil { errorsMessages = append(errorsMessages, fmt.Sprintf("failed to dump request: %s", err)) diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index f97bf89cb7e0..3b3a785a0ca5 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9 +FROM golang:1.21.10 RUN \ apt-get update \ diff --git a/x-pack/libbeat/common/aws/credentials.go b/x-pack/libbeat/common/aws/credentials.go index f6efde3e2b20..981547cb1dc7 100644 --- a/x-pack/libbeat/common/aws/credentials.go +++ b/x-pack/libbeat/common/aws/credentials.go @@ -56,7 +56,7 @@ type ConfigAWS struct { // InitializeAWSConfig function creates the awssdk.Config object from the provided config func InitializeAWSConfig(beatsConfig ConfigAWS) (awssdk.Config, error) { - awsConfig, _ := GetAWSCredentials(beatsConfig) + awsConfig, _ := getAWSCredentials(beatsConfig) if awsConfig.Region == "" { if beatsConfig.DefaultRegion != "" { awsConfig.Region = beatsConfig.DefaultRegion @@ -92,12 +92,12 @@ func InitializeAWSConfig(beatsConfig ConfigAWS) (awssdk.Config, error) { return awsConfig, nil } -// GetAWSCredentials function gets aws credentials from the config. +// getAWSCredentials function gets aws credentials from the config. // If access keys given, use them as credentials. // If access keys are not given, then load from AWS config file. If credential_profile_name is not // given, default profile will be used. // If role_arn is given, assume the IAM role either with access keys or default profile. -func GetAWSCredentials(beatsConfig ConfigAWS) (awssdk.Config, error) { +func getAWSCredentials(beatsConfig ConfigAWS) (awssdk.Config, error) { // Check if accessKeyID or secretAccessKey or sessionToken is given from configuration if beatsConfig.AccessKeyID != "" || beatsConfig.SecretAccessKey != "" || beatsConfig.SessionToken != "" { return getConfigForKeys(beatsConfig), nil @@ -110,17 +110,10 @@ func GetAWSCredentials(beatsConfig ConfigAWS) (awssdk.Config, error) { // Provided config must contain an accessKeyID, secretAccessKey and sessionToken to generate a valid CredentialsProfile func getConfigForKeys(beatsConfig ConfigAWS) awssdk.Config { config := awssdk.NewConfig() - awsCredentials := awssdk.Credentials{ - AccessKeyID: beatsConfig.AccessKeyID, - SecretAccessKey: beatsConfig.SecretAccessKey, - } - - if beatsConfig.SessionToken != "" { - awsCredentials.SessionToken = beatsConfig.SessionToken - } - - addStaticCredentialsProviderToAwsConfig(beatsConfig, config) - + config.Credentials = credentials.NewStaticCredentialsProvider( + beatsConfig.AccessKeyID, + beatsConfig.SecretAccessKey, + beatsConfig.SessionToken) return *config } @@ -172,15 +165,3 @@ func addAssumeRoleProviderToAwsConfig(config ConfigAWS, awsConfig *awssdk.Config } }) } - -// addStaticCredentialsProviderToAwsConfig adds a static credentials provider to the current AWS config by using the keys stored in Beats config -func addStaticCredentialsProviderToAwsConfig(beatsConfig ConfigAWS, awsConfig *awssdk.Config) { - logger := logp.NewLogger("addStaticCredentialsProviderToAwsConfig") - logger.Debug("Switching credentials provider to StaticCredentialsProvider") - staticCredentialsProvider := credentials.NewStaticCredentialsProvider( - beatsConfig.AccessKeyID, - beatsConfig.SecretAccessKey, - beatsConfig.SessionToken) - - awsConfig.Credentials = staticCredentialsProvider -} diff --git a/x-pack/libbeat/common/aws/credentials_test.go b/x-pack/libbeat/common/aws/credentials_test.go index 43bbc642bc53..9f125c6301f4 100644 --- a/x-pack/libbeat/common/aws/credentials_test.go +++ b/x-pack/libbeat/common/aws/credentials_test.go @@ -41,7 +41,7 @@ func TestGetAWSCredentials(t *testing.T) { SecretAccessKey: "abc", SessionToken: "fake-session-token", } - awsConfig, err := GetAWSCredentials(inputConfig) + awsConfig, err := getAWSCredentials(inputConfig) assert.NoError(t, err) retrievedAWSConfig, err := awsConfig.Credentials.Retrieve(context.Background()) diff --git a/x-pack/libbeat/common/aws/semaphore.go b/x-pack/libbeat/common/aws/semaphore.go deleted file mode 100644 index 28343bcbd32e..000000000000 --- a/x-pack/libbeat/common/aws/semaphore.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "context" - "sync" -) - -type Sem struct { - mutex *sync.Mutex - cond sync.Cond - available int -} - -func NewSem(n int) *Sem { - var m sync.Mutex - return &Sem{ - available: n, - mutex: &m, - cond: sync.Cond{ - L: &m, - }, - } -} - -func (s *Sem) AcquireContext(n int, ctx context.Context) (int, error) { - acquireC := make(chan int, 1) - go func() { - defer close(acquireC) - acquireC <- s.Acquire(n) - }() - - select { - case <-ctx.Done(): - return 0, ctx.Err() - case n := <-acquireC: - return n, nil - } -} - -func (s *Sem) Acquire(n int) int { - if n <= 0 { - return 0 - } - - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.available == 0 { - s.cond.Wait() - } - - if n >= s.available { - rtn := s.available - s.available = 0 - return rtn - } - - s.available -= n - return n -} - -func (s *Sem) Release(n int) { - if n <= 0 { - return - } - - s.mutex.Lock() - defer s.mutex.Unlock() - - s.available += n - s.cond.Signal() -} - -func (s *Sem) Available() int { - s.mutex.Lock() - defer s.mutex.Unlock() - - return s.available -} diff --git a/x-pack/libbeat/common/aws/semaphore_test.go b/x-pack/libbeat/common/aws/semaphore_test.go deleted file mode 100644 index f91831ef8a0b..000000000000 --- a/x-pack/libbeat/common/aws/semaphore_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSemaphore(t *testing.T) { - s := NewSem(5) - - assert.Equal(t, s.Acquire(5), 5) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - // Asks for 2, and blocks because 0 are available. - // It unblocks and returns 1 when Release(1) is called. - assert.Equal(t, s.Acquire(2), 1) - }() - - // None are available until Release(). - assert.Equal(t, s.Available(), 0) - - s.Release(1) - wg.Wait() -} diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 2f5b44963d3f..5023acb7b46a 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.21.9 AS build-env +FROM golang:1.21.10 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build .