From 67836dd0d117c246f1eb27c6b9438de4a2e0e4b7 Mon Sep 17 00:00:00 2001 From: Marethyu <45374460+Pythyu@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:56:14 +0100 Subject: [PATCH 01/97] check_pkg_size should not run on on tagged commit (#33419) --- .gitlab-ci.yml | 4 +++- .gitlab/pkg_metrics/pkg_metrics.yml | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 233e6a4ecc2c3f..37bd03600c27d9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -620,9 +620,11 @@ workflow: - <<: *if_main_branch - <<: *if_release_branch -.not_on_release_branch: +.not_on_release_branch_or_tagged_commit: - <<: *if_release_branch when: never + - <<: *if_tagged_commit + when: never .only_main: - <<: *if_not_main_branch diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 7c2b3f123568f4..5e0e78ee5527fc 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -85,7 +85,7 @@ check_pkg_size: - if: $CI_COMMIT_BRANCH == "main" when: on_success allow_failure: true - - !reference [.not_on_release_branch] + - !reference [.not_on_release_branch_or_tagged_commit] - !reference [.except_mergequeue] - when: on_success needs: From e97a3a04ff17452c0a5d3f84a49f317d3ba59d21 Mon Sep 17 00:00:00 2001 From: sabrina lu Date: Mon, 27 Jan 2025 18:17:14 -0500 Subject: [PATCH 02/97] add qualification phase for agent 6 (#33339) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .github/workflows/create_rc_pr.yml | 13 ++++- tasks/libs/releasing/version.py | 7 ++- tasks/release.py | 93 +++++++++++++++++++++++++----- 3 files changed, 94 insertions(+), 19 deletions(-) diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index caba7218cb0d2a..413e4f1bbd67de 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -108,8 +108,14 @@ jobs: echo "CHANGES=$(inv -e release.check-for-changes -r "$MATRIX")" >> $GITHUB_OUTPUT fi + - name: Check if agent 6 is in qualification phase + if: ${{ env.IS_AGENT6_RELEASE == 'true' }} + run: | + is_qualification=$(inv -e release.is-qualification -r 6.53.x --output) + echo "IS_QUALIFICATION=$is_qualification" >> $GITHUB_ENV + - name: Create RC PR - if: ${{ steps.check_for_changes.outputs.CHANGES == 'true' || env.IS_AGENT6_RELEASE == 'true' }} + if: ${{ steps.check_for_changes.outputs.CHANGES == 'true' || ( env.IS_AGENT6_RELEASE == 'true' && env.IS_QUALIFICATION == 'false') }} env: MATRIX: ${{ matrix.value }} run: | @@ -118,3 +124,8 @@ jobs: else inv -e release.create-rc -r "$MATRIX" --slack-webhook=${{ secrets.AGENT_RELEASE_SYNC_SLACK_WEBHOOK }} fi + + - name: Rebuild agent 6 RC pipeline if no changes and in qualification phase + if: ${{ env.IS_AGENT6_RELEASE == 'true' && steps.check_for_changes.outputs.CHANGES == 'false' && env.IS_QUALIFICATION == 'true' }} + run: | + inv -e release.run-rc-pipeline -r 6.53.x diff --git a/tasks/libs/releasing/version.py b/tasks/libs/releasing/version.py index 85e66ea283080d..762601fb4194d5 100644 --- a/tasks/libs/releasing/version.py +++ b/tasks/libs/releasing/version.py @@ -27,10 +27,13 @@ VERSION_RE = re.compile(r'(v)?(\d+)[.](\d+)([.](\d+))?(-devel)?(-rc\.(\d+))?') # Regex matching rc version tag format like 7.50.0-rc.1 -RC_VERSION_RE = re.compile(r'\d+[.]\d+[.]\d+-rc\.\d+') +RC_VERSION_RE = re.compile(r'^\d+[.]\d+[.]\d+-rc\.\d+$') + +# Regex matching final version tag format like 7.54.0 +FINAL_VERSION_RE = re.compile(r'^\d+[.]\d+[.]\d+$') # Regex matching minor release rc version tag like x.y.0-rc.1 (semver PATCH == 0), but not x.y.1-rc.1 (semver PATCH > 0) -MINOR_RC_VERSION_RE = re.compile(r'\d+[.]\d+[.]0-rc\.\d+') +MINOR_RC_VERSION_RE = re.compile(r'^\d+[.]\d+[.]0-rc\.\d+$') # Regex matching the git describe output DESCRIBE_PATTERN = re.compile(r"^.*-(?P\d+)-g[0-9a-f]+$") diff --git a/tasks/release.py b/tasks/release.py index 5eebde56baec2d..4f1bfe8e13a907 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -17,7 +17,7 @@ from time import sleep from gitlab import GitlabError -from invoke import task +from invoke import Failure, task from invoke.exceptions import Exit from tasks.libs.ciproviders.github_api import GithubAPI, create_release_pr @@ -65,6 +65,7 @@ update_release_json, ) from tasks.libs.releasing.version import ( + FINAL_VERSION_RE, MINOR_RC_VERSION_RE, RC_VERSION_RE, VERSION_RE, @@ -85,6 +86,7 @@ BACKPORT_LABEL_COLOR = "5319e7" TAG_BATCH_SIZE = 3 +QUALIFICATION_TAG = "qualification" @task @@ -148,10 +150,11 @@ def __get_force_option(force: bool) -> str: return force_option -def __tag_single_module(ctx, module, agent_version, commit, force_option, devel): +def __tag_single_module(ctx, module, tag_name, commit, force_option, devel): """Tag a given module.""" tags = [] - for tag in module.tag(agent_version): + tags_to_commit = module.tag(tag_name) if VERSION_RE.match(tag_name) else [tag_name] + for tag in tags_to_commit: if devel: tag += "-devel" @@ -210,7 +213,15 @@ def tag_modules( @task def tag_version( - ctx, release_branch=None, commit="HEAD", push=True, force=False, devel=False, version=None, trust=False + ctx, + release_branch=None, + commit="HEAD", + push=True, + force=False, + devel=False, + version=None, + trust=False, + start_qual=False, ): """Create tags for a given Datadog Agent version. @@ -220,6 +231,7 @@ def tag_version( push: Will push the tags to the origin remote (on by default). force: Will allow the task to overwrite existing tags. Needed to move existing tags (off by default). devel: Will create -devel tags (used after creation of the release branch) + start_qual: Will start the qualification phase for agent 6 release candidate by adding a qualification tag Examples: $ inv -e release.tag-version 7.27.x # Create tags and push them to origin @@ -236,6 +248,16 @@ def tag_version( with agent_context(ctx, release_branch, skip_checkout=release_branch is None): tags = __tag_single_module(ctx, get_default_modules()["."], agent_version, commit, force_option, devel) + # create or update the qualification tag using the force option (points tag to next RC) + if is_agent6(ctx) and (start_qual or is_qualification(ctx, "6.53.x")): + if FINAL_VERSION_RE.match(agent_version): + ctx.run(f"git push --delete origin {QUALIFICATION_TAG}") + else: + force_option = __get_force_option(not start_qual) + tags += __tag_single_module( + ctx, get_default_modules()["."], QUALIFICATION_TAG, commit, force_option, False + ) + if push: tags_list = ' '.join(tags) ctx.run(f"git push origin {tags_list}{force_option}") @@ -472,7 +494,21 @@ def create_rc(ctx, release_branch, patch_version=False, upstream="origin", slack @task -def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False): +def is_qualification(ctx, release_branch, output=False): + with agent_context(ctx, release_branch): + try: + ctx.run(f"git tag | grep {QUALIFICATION_TAG}", hide=True) + if output: + print('true') + return True + except Failure: + if output: + print("false") + return False + + +@task +def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False, start_qual=False): """To be done after the PR created by release.create-rc is merged, with the same options as release.create-rc. @@ -481,6 +517,7 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False): Args: k8s_deployments: When set to True the child pipeline deploying to subset of k8s staging clusters will be triggered. + start_qual: Start the qualification phase for agent 6 release candidates. """ major_version = get_version_major(release_branch) @@ -530,7 +567,7 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False): # tag_version only takes the highest version (Agent 7 currently), and creates # the tags for all supported versions # TODO: make it possible to do Agent 6-only or Agent 7-only tags? - tag_version(ctx, version=str(new_version), force=False) + tag_version(ctx, version=str(new_version), force=False, start_qual=start_qual) tag_modules(ctx, version=str(new_version), force=False) print(color_message(f"Waiting until the {new_version} tag appears in Gitlab", "bold")) @@ -546,16 +583,40 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False): print(color_message("Creating RC pipeline", "bold")) # Step 2: Run the RC pipeline + run_rc_pipeline(release_branch, gitlab_tag.name, k8s_deployments) - run( - ctx, - git_ref=gitlab_tag.name, - use_release_entries=True, - repo_branch="beta", - deploy=True, - rc_build=True, - rc_k8s_deployments=k8s_deployments, - ) + +def get_qualification_rc_tag(ctx, release_branch): + with agent_context(ctx, release_branch): + err_msg = "Error: Expected exactly one release candidate tag associated with the qualification tag commit. Tags found:" + try: + res = ctx.run(f"git tag --points-at $(git rev-list -n 1 {QUALIFICATION_TAG}) | grep 6.53") + except Failure as err: + raise Exit(message=f"{err_msg} []", code=1) from err + + tags = [tag for tag in res.stdout.split("\n") if tag.strip()] + if len(tags) > 1: + raise Exit(message=f"{err_msg} {tags}", code=1) + if not RC_VERSION_RE.match(tags[0]): + raise Exit(message=f"Error: The tag '{tags[0]}' does not match expected release candidate pattern", code=1) + + return tags[0] + + +@task +def run_rc_pipeline(ctx, release_branch, gitlab_tag=None, k8s_deployments=False): + if not gitlab_tag: + gitlab_tag = get_qualification_rc_tag(ctx, release_branch) + + run( + ctx, + git_ref=gitlab_tag, + use_release_entries=True, + repo_branch="beta", + deploy=True, + rc_build=True, + rc_k8s_deployments=k8s_deployments, + ) @task(help={'key': "Path to an existing release.json key, separated with double colons, eg. 'last_stable::6'"}) @@ -1269,7 +1330,7 @@ def check_previous_agent6_rc(ctx): err_msg += agent6_prs response = get_ci_pipeline_events( - 'ci_level:pipeline @ci.pipeline.name:"DataDog/datadog-agent" @git.tag:6.53.* -@ci.pipeline.downstream:true', + 'ci_level:pipeline @ci.pipeline.name:"DataDog/datadog-agent" @git.tag:6.53.* -@ci.pipeline.downstream:true -@ci.partial_pipeline:retry', 7, ) if not response.data: From bd9d797a10cfe6bb6f841f56c7d6f4ae4616f95a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Tue, 28 Jan 2025 04:13:57 -0500 Subject: [PATCH 03/97] [ADXT-861] RFC - Adding Log Parsing to TestWasher (flakes.yaml) (#33185) --- flakes.yaml | 34 +++++++++----- pkg/util/testutil/flake/parse.go | 11 ++++- pkg/util/testutil/flake/parse_test.go | 41 +++++++++++++--- tasks/testwasher.py | 56 ++++++++++++++++++++-- tasks/unit_tests/testdata/flakes_1.yaml | 2 +- tasks/unit_tests/testdata/flakes_2.yaml | 2 +- tasks/unit_tests/testdata/flakes_3.yaml | 2 +- tasks/unit_tests/testdata/flakes_4.yaml | 2 +- tasks/unit_tests/testdata/flakes_5.yaml | 3 ++ tasks/unit_tests/testdata/flakes_6.yaml | 3 ++ tasks/unit_tests/testdata/flakes_7.yaml | 4 ++ tasks/unit_tests/testdata/flakes_8.yaml | 5 ++ tasks/unit_tests/testdata/flakes_9.yaml | 1 + tasks/unit_tests/testwasher_tests.py | 62 +++++++++++++++++++++++++ 14 files changed, 200 insertions(+), 28 deletions(-) create mode 100644 tasks/unit_tests/testdata/flakes_5.yaml create mode 100644 tasks/unit_tests/testdata/flakes_6.yaml create mode 100644 tasks/unit_tests/testdata/flakes_7.yaml create mode 100644 tasks/unit_tests/testdata/flakes_8.yaml create mode 100644 tasks/unit_tests/testdata/flakes_9.yaml diff --git a/flakes.yaml b/flakes.yaml index da655d3c2da03a..0363b21c60861f 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -1,15 +1,27 @@ -# Pkg Name: Test list -# If you mute a parent test it will ignore all the subtests as well -# For example: +# * Here is the file format: +# : +# - test: # Mark this test always flaky +# - test: +# on-log: # Mark this test flaky if the pattern (regex) is found within its log +# - test: +# on-log: # Mark this test flaky if any of the patterns are found within its log +# - +# - +# It is also possible to specify a log pattern for all tests: +# on-log: +# - "I'm flaky..." +# * For example: # "pkg/gohai": -# - "TestGetPayload" -# "test/new-e2e/tests/agent-platform/install-script" -# - "TestInstallScript/test_install_script_on_centos-79_x86_64_datadog-agent_agent_7" +# - test: "TestGetPayload" +# "test/new-e2e/tests/agent-platform/install-script": +# - test: "TestInstallScript/test_install_script_on_centos-79_x86_64_datadog-agent_agent_7" +# * Note: +# If you mute a parent test it will ignore all the subtests as well. # TODO: https://datadoghq.atlassian.net/browse/CONTINT-4143 test/new-e2e/tests/containers: - - TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} - - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - - TestKindSuite/TestAdmissionControllerWithAutoDetectedLanguage - - TestEKSSuite/TestAdmissionControllerWithAutoDetectedLanguage + - test: TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} + - test: TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} + - test: TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} + - test: TestKindSuite/TestAdmissionControllerWithAutoDetectedLanguage + - test: TestEKSSuite/TestAdmissionControllerWithAutoDetectedLanguage diff --git a/pkg/util/testutil/flake/parse.go b/pkg/util/testutil/flake/parse.go index f56041c449f024..43135976f4f58c 100644 --- a/pkg/util/testutil/flake/parse.go +++ b/pkg/util/testutil/flake/parse.go @@ -56,14 +56,21 @@ func (k *KnownFlakyTests) IsFlaky(pkg string, testName string) bool { // Parse parses the reader in the flake.yaml format func Parse(r io.Reader) (*KnownFlakyTests, error) { dec := yaml.NewDecoder(r) - pkgToTests := make(map[string][]string) + pkgToTests := make(map[string][]map[string]string) if err := dec.Decode(&pkgToTests); err != nil { return nil, fmt.Errorf("unmarshal: %w", err) } kf := &KnownFlakyTests{packageTestList: make(map[string]map[string]struct{})} for pkg, tests := range pkgToTests { for _, t := range tests { - kf.Add(pkg, t) + if _, ok := t["test"]; !ok { + return nil, fmt.Errorf("test field is required for %s", pkg) + } + + // Do not include tests that have an on-log field since they are not always flaky + if _, hasOnLog := t["on-log"]; !hasOnLog { + kf.Add(pkg, t["test"]) + } } } return kf, nil diff --git a/pkg/util/testutil/flake/parse_test.go b/pkg/util/testutil/flake/parse_test.go index 38a55f62bcb28e..2d8eec23057451 100644 --- a/pkg/util/testutil/flake/parse_test.go +++ b/pkg/util/testutil/flake/parse_test.go @@ -26,17 +26,30 @@ func TestIsFlaky(t *testing.T) { } const flake1 = `pkg/gohai: - - TestGetPayload` + - test: TestGetPayload` const flake2 = `pkg/toto: - - TestGetPayload - - TestOtherTest` + - test: TestGetPayload + - test: TestOtherTest` const flake3 = `pkg/gohai: - - TestGetPayload + - test: TestGetPayload pkg/toto: - - TestGetPayload - - TestOtherTest` + - test: TestGetPayload + - test: TestOtherTest` + +const flake4 = `pkg/gohai: + - test: TestGetPayload +pkg/toto: + - test: TestGetPayload + - test: TestOtherTest + on-log: "hello"` + +const flakeError = `pkg/gohai: + - test: TestGetPayload +pkg/toto: + - test: TestGetPayload + - on-log: "hello"` func TestFlakesParse(t *testing.T) { t.Run("1", func(t *testing.T) { @@ -67,4 +80,20 @@ func TestFlakesParse(t *testing.T) { assert.Contains(t, kf.packageTestList["pkg/toto"], "TestOtherTest") } }) + + t.Run("4", func(t *testing.T) { + kf, err := Parse(bytes.NewBuffer([]byte(flake4))) + require.NoError(t, err) + if assert.Contains(t, kf.packageTestList, "pkg/gohai") { + assert.Contains(t, kf.packageTestList["pkg/gohai"], "TestGetPayload") + } + if assert.Contains(t, kf.packageTestList, "pkg/toto") { + assert.Contains(t, kf.packageTestList["pkg/toto"], "TestGetPayload") + } + }) + + t.Run("5", func(t *testing.T) { + _, err := Parse(bytes.NewBuffer([]byte(flakeError))) + require.Error(t, err) + }) } diff --git a/tasks/testwasher.py b/tasks/testwasher.py index 519d063c0e4607..853bb13da79947 100644 --- a/tasks/testwasher.py +++ b/tasks/testwasher.py @@ -3,6 +3,7 @@ import copy import json import os +import re from collections import defaultdict import yaml @@ -29,6 +30,10 @@ def __init__( self.flaky_test_indicator = flaky_test_indicator self.flakes_file_path = flakes_file_path self.known_flaky_tests = defaultdict(set) + # flaky_log_patterns[package][test] = [pattern1, pattern2...] + self.flaky_log_patterns = defaultdict(dict) + # Top level `on-log` used to have a pattern for every test + self.flaky_log_main_patterns = [] self.parse_flaky_file() @@ -68,12 +73,33 @@ def parse_flaky_file(self): """ Parse the flakes.yaml file and add the tests listed there to the kown flaky tests list """ + reserved_keywords = ("on-log",) + with open(self.flakes_file_path) as f: flakes = yaml.safe_load(f) - if not flakes: - return - for package, tests in flakes.items(): - self.known_flaky_tests[f"github.com/DataDog/datadog-agent/{package}"].update(set(tests)) + + if not flakes: + return + + # Add the tests to the known flaky tests list + for package, tests in flakes.items(): + if package in reserved_keywords: + continue + + for test in tests: + if 'on-log' in test: + patterns = test['on-log'] + if isinstance(patterns, str): + patterns = [patterns] + self.flaky_log_patterns[f"github.com/DataDog/datadog-agent/{package}"][test['test']] = patterns + else: + # If there is no `on-log`, we consider it as a known flaky test right away + self.known_flaky_tests[f"github.com/DataDog/datadog-agent/{package}"].add(test['test']) + + # on-log patterns at the top level + self.flaky_log_main_patterns = flakes.get('on-log', []) + if isinstance(self.flaky_log_main_patterns, str): + self.flaky_log_main_patterns = [self.flaky_log_main_patterns] def parse_test_results(self, module_path: str) -> tuple[dict, dict]: failing_tests = defaultdict(set) @@ -93,10 +119,30 @@ def parse_test_results(self, module_path: str) -> tuple[dict, dict]: if "Output" in test_result and "panic:" in test_result["Output"]: failing_tests[test_result["Package"]].add(test_result["Test"]) - if "Output" in test_result and self.flaky_test_indicator in test_result["Output"]: + if "Output" in test_result and self.is_flaky_from_log( + test_result["Package"], test_result["Test"], test_result["Output"] + ): flaky_marked_tests[test_result["Package"]].add(test_result["Test"]) return failing_tests, flaky_marked_tests + def is_flaky_from_log(self, package: str, test: str, log: str) -> bool: + """Returns whether the test is flaky based on the log output.""" + + if self.flaky_test_indicator in log: + return True + + # Check if the log contains any of the flaky patterns + patterns = self.flaky_log_main_patterns + + if test in self.flaky_log_patterns[package]: + patterns += self.flaky_log_patterns[package][test] + + for pattern in patterns: + if re.search(pattern, log, re.IGNORECASE): + return True + + return False + def process_module_results(self, module_results: list[ModuleTestResult]): """ Process the module test results and decide whether we should succeed or not. diff --git a/tasks/unit_tests/testdata/flakes_1.yaml b/tasks/unit_tests/testdata/flakes_1.yaml index da2fca4ead4c34..49e925be0e19fe 100644 --- a/tasks/unit_tests/testdata/flakes_1.yaml +++ b/tasks/unit_tests/testdata/flakes_1.yaml @@ -1,2 +1,2 @@ pkg/gohai: - - TestGetPayload + - test: TestGetPayload diff --git a/tasks/unit_tests/testdata/flakes_2.yaml b/tasks/unit_tests/testdata/flakes_2.yaml index 0b944cf9f52926..f68cadcf8e0f0b 100644 --- a/tasks/unit_tests/testdata/flakes_2.yaml +++ b/tasks/unit_tests/testdata/flakes_2.yaml @@ -1,2 +1,2 @@ pkg/toto: - - TestGetPayload + - test: TestGetPayload diff --git a/tasks/unit_tests/testdata/flakes_3.yaml b/tasks/unit_tests/testdata/flakes_3.yaml index 6b83f4a0a7006e..03b53015faf463 100644 --- a/tasks/unit_tests/testdata/flakes_3.yaml +++ b/tasks/unit_tests/testdata/flakes_3.yaml @@ -1,2 +1,2 @@ test/new-e2e/tests/containers: - - TestEKSSuite/Ifail + - test: TestEKSSuite/Ifail diff --git a/tasks/unit_tests/testdata/flakes_4.yaml b/tasks/unit_tests/testdata/flakes_4.yaml index faacbb2ef6cadf..4a057959f84a13 100644 --- a/tasks/unit_tests/testdata/flakes_4.yaml +++ b/tasks/unit_tests/testdata/flakes_4.yaml @@ -1,2 +1,2 @@ pkg/serverless/trace: - - TestLoadConfigShouldBeFast + - test: TestLoadConfigShouldBeFast diff --git a/tasks/unit_tests/testdata/flakes_5.yaml b/tasks/unit_tests/testdata/flakes_5.yaml new file mode 100644 index 00000000000000..b933121d461153 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_5.yaml @@ -0,0 +1,3 @@ +pkg/serverless/trace: + - test: TestLoadConfigShouldBeFast + on-log: "This text won't be found in the log" diff --git a/tasks/unit_tests/testdata/flakes_6.yaml b/tasks/unit_tests/testdata/flakes_6.yaml new file mode 100644 index 00000000000000..fc3c0a066682b7 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_6.yaml @@ -0,0 +1,3 @@ +pkg/serverless/trace: + - test: TestLoadConfigShouldBeFast + on-log: "panic.*toto" diff --git a/tasks/unit_tests/testdata/flakes_7.yaml b/tasks/unit_tests/testdata/flakes_7.yaml new file mode 100644 index 00000000000000..ef0eaf37b2a285 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_7.yaml @@ -0,0 +1,4 @@ +pkg/serverless/trace: + - test: TestLoadConfigShouldBeFast + on-log: + - "panic.*toto" diff --git a/tasks/unit_tests/testdata/flakes_8.yaml b/tasks/unit_tests/testdata/flakes_8.yaml new file mode 100644 index 00000000000000..6001cde9ecde02 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_8.yaml @@ -0,0 +1,5 @@ +pkg/serverless/trace: + - test: TestLoadConfigShouldBeFast + on-log: + - "panic2.*toto" + - "panic.*toto" diff --git a/tasks/unit_tests/testdata/flakes_9.yaml b/tasks/unit_tests/testdata/flakes_9.yaml new file mode 100644 index 00000000000000..a89bb5766ab7b9 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_9.yaml @@ -0,0 +1 @@ +on-log: "panic.*toto" diff --git a/tasks/unit_tests/testwasher_tests.py b/tasks/unit_tests/testwasher_tests.py index 6cd062d4ee0cda..7811fa37dc8c18 100644 --- a/tasks/unit_tests/testwasher_tests.py +++ b/tasks/unit_tests/testwasher_tests.py @@ -109,6 +109,68 @@ def test_flaky_panicking_flakesyaml_test(self): ) self.assertEqual(non_flaky_failing_tests, {}) + def test_flaky_on_log(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_5.yaml", + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + flaky_tests = test_washer.merge_known_flakes(marked_flaky_tests) + self.assertEqual(flaky_tests, {}) + + def test_flaky_on_log2(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_6.yaml", + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + flaky_tests = test_washer.merge_known_flakes(marked_flaky_tests) + self.assertEqual( + flaky_tests, + {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, + ) + + def test_flaky_on_log3(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_7.yaml", + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + flaky_tests = test_washer.merge_known_flakes(marked_flaky_tests) + self.assertEqual( + flaky_tests, + {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, + ) + + def test_flaky_on_log4(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_8.yaml", + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + flaky_tests = test_washer.merge_known_flakes(marked_flaky_tests) + self.assertEqual( + flaky_tests, + {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, + ) + + def test_flaky_on_log5(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_9.yaml", + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + flaky_tests = test_washer.merge_known_flakes(marked_flaky_tests) + self.assertEqual( + flaky_tests, + {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, + ) + class TestMergeKnownFlakes(unittest.TestCase): def test_with_shared_keys(self): From 1c505b754d84cefcea4eb49e269e4bd619a0743a Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Tue, 28 Jan 2025 10:31:32 +0100 Subject: [PATCH 04/97] [CWS] use iterator name as cache key (#33418) --- .../generators/accessors/accessors.tmpl | 2 +- pkg/security/secl/compiler/eval/context.go | 20 +- pkg/security/secl/model/accessors_unix.go | 516 +++++++++--------- pkg/security/secl/model/accessors_windows.go | 26 +- pkg/security/secl/model/string_array_iter.go | 12 +- pkg/security/secl/rules/eval_test.go | 65 +++ pkg/security/seclwin/model/accessors_win.go | 26 +- .../seclwin/model/string_array_iter.go | 12 +- 8 files changed, 372 insertions(+), 307 deletions(-) create mode 100644 pkg/security/secl/rules/eval_test.go diff --git a/pkg/security/generators/accessors/accessors.tmpl b/pkg/security/generators/accessors/accessors.tmpl index 5845cc4931c8b7..4a197038a66bd9 100644 --- a/pkg/security/generators/accessors/accessors.tmpl +++ b/pkg/security/generators/accessors/accessors.tmpl @@ -138,7 +138,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{if $Field.GetArrayPrefix}} {{$AncestorFunc = "newAncestorsIteratorArray"}} {{end}} - results := {{$AncestorFunc}}(iterator, field, ctx, {{$Event}}, func(ev *Event, current *{{$Field.Iterator.OrigType}}) {{$Field.GetArrayPrefix}}{{$Field.ReturnType}} { + results := {{$AncestorFunc}}(iterator, "{{$Field.Iterator.Name}}", ctx, {{$Event}}, func(ev *Event, current *{{$Field.Iterator.OrigType}}) {{$Field.GetArrayPrefix}}{{$Field.ReturnType}} { {{range $Check := $Checks}} {{if $Field.Iterator.Name | HasPrefix $Check}} {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go index 473c4480e6a1c2..b5cd17a1450ead 100644 --- a/pkg/security/secl/compiler/eval/context.go +++ b/pkg/security/secl/compiler/eval/context.go @@ -36,7 +36,7 @@ type Context struct { now time.Time - AncestorsCounters map[string]int + IteratorCountCache map[string]int resolvedFields []string @@ -68,7 +68,7 @@ func (c *Context) Reset() { clear(c.BoolCache) clear(c.Registers) clear(c.RegisterCache) - clear(c.AncestorsCounters) + clear(c.IteratorCountCache) c.resolvedFields = nil } @@ -80,14 +80,14 @@ func (c *Context) GetResolvedFields() []string { // NewContext return a new Context func NewContext(evt Event) *Context { return &Context{ - Event: evt, - StringCache: make(map[string][]string), - IPNetCache: make(map[string][]net.IPNet), - IntCache: make(map[string][]int), - BoolCache: make(map[string][]bool), - Registers: make(map[RegisterID]int), - RegisterCache: make(map[RegisterID]*RegisterCacheEntry), - AncestorsCounters: make(map[string]int), + Event: evt, + StringCache: make(map[string][]string), + IPNetCache: make(map[string][]net.IPNet), + IntCache: make(map[string][]int), + BoolCache: make(map[string][]bool), + Registers: make(map[RegisterID]int), + RegisterCache: make(map[RegisterID]*RegisterCacheEntry), + IteratorCountCache: make(map[string]int), } } diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index 8970d8871b2fd4..f880de9310bb2e 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -4847,7 +4847,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IPNetCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) net.IPNet { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { return current.Destination.IPNet }) ctx.IPNetCache[field] = results @@ -4873,7 +4873,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *Flow) bool { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Destination) }) ctx.BoolCache[field] = results @@ -4898,7 +4898,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Destination.Port) }) ctx.IntCache[field] = results @@ -4923,7 +4923,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Egress.DataSize) }) ctx.IntCache[field] = results @@ -4948,7 +4948,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Egress.PacketCount) }) ctx.IntCache[field] = results @@ -4973,7 +4973,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Ingress.DataSize) }) ctx.IntCache[field] = results @@ -4998,7 +4998,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Ingress.PacketCount) }) ctx.IntCache[field] = results @@ -5023,7 +5023,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.L3Protocol) }) ctx.IntCache[field] = results @@ -5048,7 +5048,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.L4Protocol) }) ctx.IntCache[field] = results @@ -5083,7 +5083,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IPNetCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) net.IPNet { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { return current.Source.IPNet }) ctx.IPNetCache[field] = results @@ -5109,7 +5109,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *Flow) bool { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Source) }) ctx.BoolCache[field] = results @@ -5134,7 +5134,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Source.Port) }) ctx.IntCache[field] = results @@ -5644,7 +5644,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5669,7 +5669,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5694,7 +5694,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5719,7 +5719,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -5744,7 +5744,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5769,7 +5769,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5793,7 +5793,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -5817,7 +5817,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -5841,7 +5841,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -5865,7 +5865,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -5889,7 +5889,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -5914,7 +5914,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -5939,7 +5939,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -5964,7 +5964,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -5988,7 +5988,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -6013,7 +6013,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6038,7 +6038,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -6062,7 +6062,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -6086,7 +6086,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -6111,7 +6111,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6136,7 +6136,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6161,7 +6161,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -6185,7 +6185,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -6209,7 +6209,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -6237,7 +6237,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6270,7 +6270,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6302,7 +6302,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6335,7 +6335,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6368,7 +6368,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -6401,7 +6401,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -6433,7 +6433,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6465,7 +6465,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6497,7 +6497,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6529,7 +6529,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6563,7 +6563,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6593,7 +6593,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -6622,7 +6622,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6655,7 +6655,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6688,7 +6688,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6722,7 +6722,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6752,7 +6752,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -6781,7 +6781,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6813,7 +6813,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6846,7 +6846,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6874,7 +6874,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -6898,7 +6898,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -6922,7 +6922,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -6946,7 +6946,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -6970,7 +6970,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -6994,7 +6994,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -7022,7 +7022,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7055,7 +7055,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7087,7 +7087,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7120,7 +7120,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7153,7 +7153,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -7186,7 +7186,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -7218,7 +7218,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7250,7 +7250,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7282,7 +7282,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7314,7 +7314,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7348,7 +7348,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7378,7 +7378,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -7407,7 +7407,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7440,7 +7440,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7473,7 +7473,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7507,7 +7507,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7537,7 +7537,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -7566,7 +7566,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7598,7 +7598,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7631,7 +7631,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7659,7 +7659,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -7683,7 +7683,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -7708,7 +7708,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -7742,7 +7742,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -7766,7 +7766,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -7790,7 +7790,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -7814,7 +7814,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -7838,7 +7838,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -7862,7 +7862,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -7887,7 +7887,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -7912,7 +7912,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -7937,7 +7937,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -10238,7 +10238,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10263,7 +10263,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10288,7 +10288,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10313,7 +10313,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -10338,7 +10338,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10363,7 +10363,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10387,7 +10387,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -10411,7 +10411,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -10435,7 +10435,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -10459,7 +10459,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -10483,7 +10483,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -10508,7 +10508,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -10533,7 +10533,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -10558,7 +10558,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -10582,7 +10582,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -10607,7 +10607,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10632,7 +10632,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -10656,7 +10656,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -10680,7 +10680,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -10705,7 +10705,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10730,7 +10730,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10755,7 +10755,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -10779,7 +10779,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -10803,7 +10803,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -10831,7 +10831,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -10864,7 +10864,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -10896,7 +10896,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -10929,7 +10929,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -10962,7 +10962,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -10995,7 +10995,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -11027,7 +11027,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11059,7 +11059,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11091,7 +11091,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11123,7 +11123,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11157,7 +11157,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11187,7 +11187,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -11216,7 +11216,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11249,7 +11249,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11282,7 +11282,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11316,7 +11316,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11346,7 +11346,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -11375,7 +11375,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11407,7 +11407,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11440,7 +11440,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11468,7 +11468,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -11492,7 +11492,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -11516,7 +11516,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -11540,7 +11540,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -11564,7 +11564,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -11588,7 +11588,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -11616,7 +11616,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11649,7 +11649,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11681,7 +11681,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11714,7 +11714,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11747,7 +11747,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -11780,7 +11780,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -11812,7 +11812,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11844,7 +11844,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11876,7 +11876,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11908,7 +11908,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11942,7 +11942,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11972,7 +11972,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -12001,7 +12001,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12034,7 +12034,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12067,7 +12067,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12101,7 +12101,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12131,7 +12131,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -12160,7 +12160,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -12192,7 +12192,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -12225,7 +12225,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12253,7 +12253,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -12277,7 +12277,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -12302,7 +12302,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -12336,7 +12336,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -12360,7 +12360,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -12384,7 +12384,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -12408,7 +12408,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -12432,7 +12432,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -12456,7 +12456,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -12481,7 +12481,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -12506,7 +12506,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -12531,7 +12531,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -16122,7 +16122,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16147,7 +16147,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16172,7 +16172,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16197,7 +16197,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -16222,7 +16222,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16247,7 +16247,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16271,7 +16271,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -16295,7 +16295,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -16319,7 +16319,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -16343,7 +16343,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -16367,7 +16367,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -16392,7 +16392,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -16417,7 +16417,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -16442,7 +16442,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -16466,7 +16466,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -16491,7 +16491,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16516,7 +16516,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -16540,7 +16540,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -16564,7 +16564,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -16589,7 +16589,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16614,7 +16614,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16639,7 +16639,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -16663,7 +16663,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -16687,7 +16687,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -16715,7 +16715,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16748,7 +16748,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -16780,7 +16780,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16813,7 +16813,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -16846,7 +16846,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -16879,7 +16879,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -16911,7 +16911,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16943,7 +16943,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16975,7 +16975,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17007,7 +17007,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17041,7 +17041,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17071,7 +17071,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -17100,7 +17100,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17133,7 +17133,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17166,7 +17166,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17200,7 +17200,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17230,7 +17230,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -17259,7 +17259,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17291,7 +17291,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17324,7 +17324,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17352,7 +17352,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -17376,7 +17376,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -17400,7 +17400,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -17424,7 +17424,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -17448,7 +17448,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -17472,7 +17472,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -17500,7 +17500,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17533,7 +17533,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17565,7 +17565,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17598,7 +17598,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17631,7 +17631,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -17664,7 +17664,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -17696,7 +17696,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17728,7 +17728,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17760,7 +17760,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17792,7 +17792,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17826,7 +17826,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17856,7 +17856,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -17885,7 +17885,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17918,7 +17918,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17951,7 +17951,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17985,7 +17985,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -18015,7 +18015,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -18044,7 +18044,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -18076,7 +18076,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -18109,7 +18109,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -18137,7 +18137,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -18161,7 +18161,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -18186,7 +18186,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -18220,7 +18220,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -18244,7 +18244,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -18268,7 +18268,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -18292,7 +18292,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -18316,7 +18316,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -18340,7 +18340,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -18365,7 +18365,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -18390,7 +18390,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -18415,7 +18415,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go index 5c07db9be757a4..eb63650b480ebb 100644 --- a/pkg/security/secl/model/accessors_windows.go +++ b/pkg/security/secl/model/accessors_windows.go @@ -884,7 +884,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -908,7 +908,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results @@ -933,7 +933,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -958,7 +958,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -983,7 +983,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1009,7 +1009,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1035,7 +1035,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1061,7 +1061,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1087,7 +1087,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1121,7 +1121,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -1145,7 +1145,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -1170,7 +1170,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1194,7 +1194,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results diff --git a/pkg/security/secl/model/string_array_iter.go b/pkg/security/secl/model/string_array_iter.go index 10034151c85d8d..b2bfc5ae9d99d9 100644 --- a/pkg/security/secl/model/string_array_iter.go +++ b/pkg/security/secl/model/string_array_iter.go @@ -23,23 +23,23 @@ func isNil[V comparable](v V) bool { } func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { - results := make([]T, 0, ctx.AncestorsCounters[field]) + results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)) } - ctx.AncestorsCounters[field] = len(results) + ctx.IteratorCountCache[field] = len(results) return results } func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { - results := make([]T, 0, ctx.AncestorsCounters[field]) - ancestorsCount := 0 + results := make([]T, 0, ctx.IteratorCountCache[field]) + count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)...) - ancestorsCount++ + count++ } - ctx.AncestorsCounters[field] = ancestorsCount + ctx.IteratorCountCache[field] = count return results } diff --git a/pkg/security/secl/rules/eval_test.go b/pkg/security/secl/rules/eval_test.go new file mode 100644 index 00000000000000..f46863d15fed26 --- /dev/null +++ b/pkg/security/secl/rules/eval_test.go @@ -0,0 +1,65 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package rules holds rules related files +package rules + +import ( + "testing" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" +) + +func TestIteratorCache(t *testing.T) { + event := model.NewFakeEvent() + + event.Exec = model.ExecEvent{ + Process: &model.Process{ + FileEvent: model.FileEvent{ + FileFields: model.FileFields{ + UID: 22, + }, + }, + }, + } + event.ProcessContext = &model.ProcessContext{ + Ancestor: &model.ProcessCacheEntry{ + ProcessContext: model.ProcessContext{ + Process: model.Process{ + PIDContext: model.PIDContext{ + Pid: 111, + }, + PPid: 111, + }, + }, + }, + } + + evalRule, err := eval.NewRule("test", `exec.file.uid == 22 && process.ancestors.pid == 111 && process.ancestors.ppid == 111`, ast.NewParsingContext(false), &eval.Opts{}) + if err != nil { + t.Error(err) + } + + rule := &Rule{ + Rule: evalRule, + } + + err = rule.GenEvaluator(&model.Model{}) + if err != nil { + t.Error(err) + } + + ctx := eval.NewContext(event) + + rule.Eval(ctx) + + if len(ctx.IteratorCountCache) != 1 || ctx.IteratorCountCache["BaseEvent.ProcessContext.Ancestor"] != 1 { + t.Errorf("wrong iterator cache entries: %+v", ctx.IteratorCountCache) + } +} diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go index 4a5e19a7e0a9c7..38084deace9826 100644 --- a/pkg/security/seclwin/model/accessors_win.go +++ b/pkg/security/seclwin/model/accessors_win.go @@ -882,7 +882,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -906,7 +906,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results @@ -931,7 +931,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -956,7 +956,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -981,7 +981,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1007,7 +1007,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1033,7 +1033,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1059,7 +1059,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1085,7 +1085,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1119,7 +1119,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -1143,7 +1143,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -1168,7 +1168,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1192,7 +1192,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results diff --git a/pkg/security/seclwin/model/string_array_iter.go b/pkg/security/seclwin/model/string_array_iter.go index 10034151c85d8d..b2bfc5ae9d99d9 100644 --- a/pkg/security/seclwin/model/string_array_iter.go +++ b/pkg/security/seclwin/model/string_array_iter.go @@ -23,23 +23,23 @@ func isNil[V comparable](v V) bool { } func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { - results := make([]T, 0, ctx.AncestorsCounters[field]) + results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)) } - ctx.AncestorsCounters[field] = len(results) + ctx.IteratorCountCache[field] = len(results) return results } func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { - results := make([]T, 0, ctx.AncestorsCounters[field]) - ancestorsCount := 0 + results := make([]T, 0, ctx.IteratorCountCache[field]) + count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)...) - ancestorsCount++ + count++ } - ctx.AncestorsCounters[field] = ancestorsCount + ctx.IteratorCountCache[field] = count return results } From 89a88eafc2086eb2045b8968b984e73996620b81 Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 10:11:52 +0000 Subject: [PATCH 05/97] [test-infra-definitions][automated] Bump test-infra-definitions to f92dca10d03c3093b52db60b72d951dfaffe360d (#33421) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 87aa0a1aa94aed..73c5713eca5574 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -5,5 +5,5 @@ --- variables: - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 9e836ad2dc14 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: f92dca10d03c TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: '' diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 666ef068d487cd..3ec32b911c0488 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -60,7 +60,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20250123105416-9e836ad2dc14 + github.com/DataDog/test-infra-definitions v0.0.0-20250127165314-f92dca10d03c github.com/aws/aws-sdk-go-v2 v1.33.0 github.com/aws/aws-sdk-go-v2/config v1.29.1 github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 135c384226bc65..ff8cd7edab5a74 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -14,8 +14,8 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEU github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20250123105416-9e836ad2dc14 h1:PyX6uqsTA+UaBF8+OS9t9unAdezSZWY3MkF4fksd7fc= -github.com/DataDog/test-infra-definitions v0.0.0-20250123105416-9e836ad2dc14/go.mod h1:l+XwYwMuZpin+jhQQZAumCaeqZAz/6b7CJ+8bBmWXYU= +github.com/DataDog/test-infra-definitions v0.0.0-20250127165314-f92dca10d03c h1:MVOExmOCIoqp8Jte+Xs+R75W6Qfyc+AzbPw9YYBUd78= +github.com/DataDog/test-infra-definitions v0.0.0-20250127165314-f92dca10d03c/go.mod h1:l+XwYwMuZpin+jhQQZAumCaeqZAz/6b7CJ+8bBmWXYU= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From 6d518270dfec00a354161a06f6c0cb17d118e0f0 Mon Sep 17 00:00:00 2001 From: Guillaume Fournier <36961134+Gui774ume@users.noreply.github.com> Date: Tue, 28 Jan 2025 11:42:39 +0100 Subject: [PATCH 06/97] [CWS] Refactor NewEBPFProbe (#33251) --- pkg/security/ebpf/kernel/kernel.go | 1 + pkg/security/module/server_linux.go | 2 +- pkg/security/probe/probe_ebpf.go | 338 ++++++++++++++++------------ pkg/security/probe/probe_monitor.go | 2 +- 4 files changed, 195 insertions(+), 148 deletions(-) diff --git a/pkg/security/ebpf/kernel/kernel.go b/pkg/security/ebpf/kernel/kernel.go index 25e47574d1da04..beac84cf75aa68 100644 --- a/pkg/security/ebpf/kernel/kernel.go +++ b/pkg/security/ebpf/kernel/kernel.go @@ -326,6 +326,7 @@ func (k *Version) HaveMmapableMaps() bool { } // HaveRingBuffers returns whether the kernel supports ring buffer. +// https://github.com/torvalds/linux/commit/457f44363a8894135c85b7a9afd2bd8196db24ab func (k *Version) HaveRingBuffers() bool { return features.HaveMapType(ebpf.RingBuf) == nil } diff --git a/pkg/security/module/server_linux.go b/pkg/security/module/server_linux.go index 9e096cd8e46561..14e02c181f4399 100644 --- a/pkg/security/module/server_linux.go +++ b/pkg/security/module/server_linux.go @@ -217,7 +217,7 @@ func (a *APIServer) GetStatus(_ context.Context, _ *api.GetStatusParams) (*api.S }, KernelLockdown: string(kernel.GetLockdownMode()), UseMmapableMaps: p.GetKernelVersion().HaveMmapableMaps(), - UseRingBuffer: p.UseRingBuffers(), + UseRingBuffer: p.GetUseRingBuffers(), UseFentry: p.GetUseFentry(), } diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 66565660371e9d..cd91c574b88531 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -153,6 +153,8 @@ type EBPFProbe struct { runtimeCompiled bool useSyscallWrapper bool useFentry bool + useRingBuffers bool + useMmapableMaps bool // On demand onDemandManager *OnDemandProbesManager @@ -166,6 +168,11 @@ type EBPFProbe struct { playSnapShotState *atomic.Bool } +// GetUseRingBuffers returns p.useRingBuffers +func (p *EBPFProbe) GetUseRingBuffers() bool { + return p.useRingBuffers +} + // GetProfileManager returns the Profile Managers func (p *EBPFProbe) GetProfileManager() interface{} { return p.profileManagers @@ -185,9 +192,20 @@ func (p *EBPFProbe) GetKernelVersion() *kernel.Version { return p.kernelVersion } -// UseRingBuffers returns true if eBPF ring buffers are supported and used -func (p *EBPFProbe) UseRingBuffers() bool { - return p.config.Probe.EventStreamUseRingBuffer && p.kernelVersion.HaveRingBuffers() +// selectRingBuffersMode initializes p.useRingBuffers +func (p *EBPFProbe) selectRingBuffersMode() { + if !p.config.Probe.EventStreamUseRingBuffer { + p.useRingBuffers = false + return + } + + if !p.kernelVersion.HaveRingBuffers() { + p.useRingBuffers = false + seclog.Warnf("ringbuffers enabled but not supported on this kernel version, falling back to perf event") + return + } + + p.useRingBuffers = true } // GetUseFentry returns true if fentry is used @@ -346,7 +364,7 @@ func (p *EBPFProbe) Init() error { } p.useSyscallWrapper = useSyscallWrapper - loader := ebpf.NewProbeLoader(p.config.Probe, p.useSyscallWrapper, p.UseRingBuffers(), p.useFentry, p.statsdClient) + loader := ebpf.NewProbeLoader(p.config.Probe, p.useSyscallWrapper, p.useRingBuffers, p.useFentry, p.statsdClient) defer loader.Close() bytecodeReader, runtimeCompiled, err := loader.Load() @@ -1919,114 +1937,25 @@ func (p *EBPFProbe) EnableEnforcement(state bool) { p.processKiller.SetState(state) } -// NewEBPFProbe instantiates a new runtime security agent probe -func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, error) { - nerpc, err := erpc.NewERPC() - if err != nil { - return nil, err - } - - onDemandRate := rate.Limit(math.Inf(1)) - if config.RuntimeSecurity.OnDemandRateLimiterEnabled { - onDemandRate = MaxOnDemandEventsPerSecond - } - - processKiller, err := NewProcessKiller(config) - if err != nil { - return nil, err - } - - ctx, cancelFnc := context.WithCancel(context.Background()) - - p := &EBPFProbe{ - probe: probe, - config: config, - opts: opts, - statsdClient: opts.StatsdClient, - discarderRateLimiter: rate.NewLimiter(rate.Every(time.Second/5), 100), - kfilters: make(map[eval.EventType]kfilters.ActiveKFilters), - managerOptions: ebpf.NewDefaultOptions(), - Erpc: nerpc, - erpcRequest: erpc.NewERPCRequest(0), - isRuntimeDiscarded: !probe.Opts.DontDiscardRuntime, - ctx: ctx, - cancelFnc: cancelFnc, - newTCNetDevices: make(chan model.NetDevice, 16), - processKiller: processKiller, - onDemandRateLimiter: rate.NewLimiter(onDemandRate, 1), - playSnapShotState: atomic.NewBool(false), - } - - if err := p.detectKernelVersion(); err != nil { - // we need the kernel version to start, fail if we can't get it - return nil, err - } - - if err := p.sanityChecks(); err != nil { - return nil, err - } - - if err := p.VerifyOSVersion(); err != nil { - seclog.Warnf("the current kernel isn't officially supported, some features might not work properly: %v", err) - } - - if err := p.VerifyEnvironment(); err != nil { - seclog.Warnf("the current environment may be misconfigured: %v", err) - } - - p.selectFentryMode() - - useRingBuffers := p.UseRingBuffers() - useMmapableMaps := p.kernelVersion.HaveMmapableMaps() - - p.Manager = ebpf.NewRuntimeSecurityManager(useRingBuffers, p.useFentry) - - p.supportsBPFSendSignal = p.kernelVersion.SupportBPFSendSignal() - - p.monitors = NewEBPFMonitors(p) - - p.numCPU, err = utils.NumCPU() - if err != nil { - return nil, fmt.Errorf("failed to parse CPU count: %w", err) - } - - p.managerOptions.MapSpecEditors = probes.AllMapSpecEditors(p.numCPU, probes.MapSpecEditorOpts{ - TracedCgroupSize: config.RuntimeSecurity.ActivityDumpTracedCgroupsCount, - UseRingBuffers: useRingBuffers, - UseMmapableMaps: useMmapableMaps, - RingBufferSize: uint32(config.Probe.EventStreamBufferSize), - PathResolutionEnabled: probe.Opts.PathResolutionEnabled, - SecurityProfileMaxCount: config.RuntimeSecurity.SecurityProfileMaxCount, - NetworkFlowMonitorEnabled: config.Probe.NetworkFlowMonitorEnabled, - }, p.kernelVersion) - - if config.RuntimeSecurity.ActivityDumpEnabled { - for _, e := range config.RuntimeSecurity.ActivityDumpTracedEventTypes { - if e == model.SyscallsEventType { - // Add syscall monitor probes - p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SyscallMonitorSelectors...) - break - } - } - } - if config.RuntimeSecurity.AnomalyDetectionEnabled { - for _, e := range config.RuntimeSecurity.AnomalyDetectionEventTypes { - if e == model.SyscallsEventType { - // Add syscall monitor probes - p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SyscallMonitorSelectors...) - break - } - } - } +// initManagerOptionsTailCalls initializes the eBPF manager tail calls +func (p *EBPFProbe) initManagerOptionsTailCalls(config *config.Config) { + p.managerOptions.TailCallRouter = probes.AllTailRoutes( + config.Probe.ERPCDentryResolutionEnabled, + config.Probe.NetworkEnabled, + config.Probe.NetworkFlowMonitorEnabled, + config.Probe.NetworkRawPacketEnabled, + p.useMmapableMaps, + ) +} - p.constantOffsets, err = p.GetOffsetConstants() - if err != nil { - seclog.Warnf("constant fetcher failed: %v", err) - return nil, err - } +// initManagerOptionsConstants initiatilizes the eBPF manager constants +func (p *EBPFProbe) initManagerOptionsConstants(probe *Probe, config *config.Config) { + areCGroupADsEnabled := config.RuntimeSecurity.ActivityDumpTracedCgroupsCount > 0 + // Add global constant editors p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, constantfetch.CreateConstantEditors(p.constantOffsets)...) - + p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, DiscarderConstants...) + p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, getCGroupWriteConstants()) p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, manager.ConstantEditor{ Name: constantfetch.OffsetNameSchedProcessForkChildPid, @@ -2036,12 +1965,6 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e Name: constantfetch.OffsetNameSchedProcessForkParentPid, Value: constantfetch.ReadTracepointFieldOffsetWithFallback("sched/sched_process_fork", "parent_pid", 24), }, - ) - - areCGroupADsEnabled := config.RuntimeSecurity.ActivityDumpTracedCgroupsCount > 0 - - // Add global constant editors - p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, manager.ConstantEditor{ Name: "runtime_pid", Value: uint64(utils.Getpid()), @@ -2134,15 +2057,16 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e Name: "imds_ip", Value: uint64(config.RuntimeSecurity.IMDSIPv4), }, - ) - - p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, DiscarderConstants...) - p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, getCGroupWriteConstants()) - - p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, manager.ConstantEditor{ Name: "use_ring_buffer", - Value: utils.BoolTouint64(useRingBuffers), + Value: utils.BoolTouint64(p.useRingBuffers), + }, + manager.ConstantEditor{ + Name: "fentry_func_argc", + ValueCallback: func(prog *lib.ProgramSpec) interface{} { + // use a separate function to make sure we always return a uint64 + return getFuncArgCount(prog) + }, }, ) @@ -2163,33 +2087,47 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e }, ) } +} - p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, - manager.ConstantEditor{ - Name: "fentry_func_argc", - ValueCallback: func(prog *lib.ProgramSpec) interface{} { - // use a separate function to make sure we always return a uint64 - return getFuncArgCount(prog) - }, - }, - ) +// initManagerOptionsMaps initializes the eBPF manager map spec editors and map reader startup +func (p *EBPFProbe) initManagerOptionsMapSpecEditors(probe *Probe, config *config.Config) { + p.managerOptions.MapSpecEditors = probes.AllMapSpecEditors(p.numCPU, probes.MapSpecEditorOpts{ + TracedCgroupSize: config.RuntimeSecurity.ActivityDumpTracedCgroupsCount, + UseRingBuffers: p.useRingBuffers, + UseMmapableMaps: p.useMmapableMaps, + RingBufferSize: uint32(config.Probe.EventStreamBufferSize), + PathResolutionEnabled: probe.Opts.PathResolutionEnabled, + SecurityProfileMaxCount: config.RuntimeSecurity.SecurityProfileMaxCount, + NetworkFlowMonitorEnabled: config.Probe.NetworkFlowMonitorEnabled, + }, p.kernelVersion) + + if p.useRingBuffers { + p.managerOptions.SkipRingbufferReaderStartup = map[string]bool{ + eventstream.EventStreamMap: true, + } + } else { + p.managerOptions.SkipPerfMapReaderStartup = map[string]bool{ + eventstream.EventStreamMap: true, + } + } +} - // tail calls - p.managerOptions.TailCallRouter = probes.AllTailRoutes(config.Probe.ERPCDentryResolutionEnabled, config.Probe.NetworkEnabled, config.Probe.NetworkFlowMonitorEnabled, config.Probe.NetworkRawPacketEnabled, useMmapableMaps) - if !config.Probe.ERPCDentryResolutionEnabled || useMmapableMaps { +// initManagerOptionsExcludedFunctions initializes the excluded functions of the eBPF manager +func (p *EBPFProbe) initManagerOptionsExcludedFunctions(config *config.Config) error { + if !config.Probe.ERPCDentryResolutionEnabled || p.useMmapableMaps { // exclude the programs that use the bpf_probe_write_user helper p.managerOptions.ExcludedFunctions = probes.AllBPFProbeWriteUserProgramFunctions() } // prevent some TC classifiers from loading - if !p.config.Probe.NetworkEnabled { + if !config.Probe.NetworkEnabled { p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetAllTCProgramFunctions()...) - } else if !p.config.Probe.NetworkRawPacketEnabled { + } else if !config.Probe.NetworkRawPacketEnabled { p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetRawPacketTCProgramFunctions()...) } // prevent some tal calls from loading - if !p.config.Probe.NetworkFlowMonitorEnabled { + if !config.Probe.NetworkFlowMonitorEnabled { p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetAllFlushNetworkStatsTaillCallFunctions()...) } @@ -2201,17 +2139,131 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e if p.useFentry { afBasedExcluder, err := newAvailableFunctionsBasedExcluder() if err != nil { - return nil, err + return err } p.managerOptions.AdditionalExcludedFunctionCollector = afBasedExcluder } + return nil +} + +// initManagerOptionsActivatedProbes initializes the eBPF manager activated probes options +func (p *EBPFProbe) initManagerOptionsActivatedProbes(config *config.Config) { + if config.RuntimeSecurity.ActivityDumpEnabled { + for _, e := range config.RuntimeSecurity.ActivityDumpTracedEventTypes { + if e == model.SyscallsEventType { + // Add syscall monitor probes + p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SyscallMonitorSelectors...) + break + } + } + } + if config.RuntimeSecurity.AnomalyDetectionEnabled { + for _, e := range config.RuntimeSecurity.AnomalyDetectionEventTypes { + if e == model.SyscallsEventType { + // Add syscall monitor probes + p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SyscallMonitorSelectors...) + break + } + } + } +} + +// initManagerOptions initializes the eBPF manager options +func (p *EBPFProbe) initManagerOptions(probe *Probe, config *config.Config) error { + p.initManagerOptionsActivatedProbes(config) + p.initManagerOptionsConstants(probe, config) + p.initManagerOptionsTailCalls(config) + p.initManagerOptionsMapSpecEditors(probe, config) + return p.initManagerOptionsExcludedFunctions(config) +} + +// NewEBPFProbe instantiates a new runtime security agent probe +func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, error) { + nerpc, err := erpc.NewERPC() + if err != nil { + return nil, err + } + + onDemandRate := rate.Limit(math.Inf(1)) + if config.RuntimeSecurity.OnDemandRateLimiterEnabled { + onDemandRate = MaxOnDemandEventsPerSecond + } + + processKiller, err := NewProcessKiller(config) + if err != nil { + return nil, err + } + + ctx, cancelFnc := context.WithCancel(context.Background()) + + p := &EBPFProbe{ + probe: probe, + config: config, + opts: opts, + statsdClient: opts.StatsdClient, + discarderRateLimiter: rate.NewLimiter(rate.Every(time.Second/5), 100), + kfilters: make(map[eval.EventType]kfilters.ActiveKFilters), + managerOptions: ebpf.NewDefaultOptions(), + Erpc: nerpc, + erpcRequest: erpc.NewERPCRequest(0), + isRuntimeDiscarded: !probe.Opts.DontDiscardRuntime, + ctx: ctx, + cancelFnc: cancelFnc, + newTCNetDevices: make(chan model.NetDevice, 16), + processKiller: processKiller, + onDemandRateLimiter: rate.NewLimiter(onDemandRate, 1), + playSnapShotState: atomic.NewBool(false), + } + + if err := p.detectKernelVersion(); err != nil { + // we need the kernel version to start, fail if we can't get it + return nil, err + } + + if err := p.sanityChecks(); err != nil { + return nil, err + } + + if err := p.VerifyOSVersion(); err != nil { + seclog.Warnf("the current kernel isn't officially supported, some features might not work properly: %v", err) + } + + if err := p.VerifyEnvironment(); err != nil { + seclog.Warnf("the current environment may be misconfigured: %v", err) + } + + p.selectFentryMode() + p.selectRingBuffersMode() + p.useMmapableMaps = p.kernelVersion.HaveMmapableMaps() + + p.Manager = ebpf.NewRuntimeSecurityManager(p.useRingBuffers, p.useFentry) + + p.supportsBPFSendSignal = p.kernelVersion.SupportBPFSendSignal() + + p.monitors = NewEBPFMonitors(p) + + p.numCPU, err = utils.NumCPU() + if err != nil { + return nil, fmt.Errorf("failed to parse CPU count: %w", err) + } + + p.constantOffsets, err = p.GetOffsetConstants() + if err != nil { + seclog.Warnf("constant fetcher failed: %v", err) + return nil, err + } + + if err := p.initManagerOptions(probe, config); err != nil { + seclog.Warnf("managerOptions init failed: %v", err) + return nil, err + } resolversOpts := resolvers.Opts{ PathResolutionEnabled: probe.Opts.PathResolutionEnabled, EnvVarsResolutionEnabled: probe.Opts.EnvsVarResolutionEnabled, Tagger: probe.Opts.Tagger, - UseRingBuffer: useRingBuffers, + UseRingBuffer: p.useRingBuffers, TTYFallbackEnabled: probe.Opts.TTYFallbackEnabled, } @@ -2244,19 +2296,13 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e return newEBPFEvent(p.fieldHandlers) }) - if useRingBuffers { + if p.useRingBuffers { p.eventStream = ringbuffer.New(p.handleEvent) - p.managerOptions.SkipRingbufferReaderStartup = map[string]bool{ - eventstream.EventStreamMap: true, - } } else { p.eventStream, err = reorderer.NewOrderedPerfMap(p.ctx, p.handleEvent, probe.StatsdClient) if err != nil { return nil, err } - p.managerOptions.SkipPerfMapReaderStartup = map[string]bool{ - eventstream.EventStreamMap: true, - } } p.event = p.NewEvent() diff --git a/pkg/security/probe/probe_monitor.go b/pkg/security/probe/probe_monitor.go index 420ac6241d3fb1..8324b852055236 100644 --- a/pkg/security/probe/probe_monitor.go +++ b/pkg/security/probe/probe_monitor.go @@ -48,7 +48,7 @@ func (m *EBPFMonitors) Init() error { p := m.ebpfProbe // instantiate a new event statistics monitor - m.eventStreamMonitor, err = eventstream.NewEventStreamMonitor(p.config.Probe, p.Erpc, p.Manager, p.statsdClient, p.onEventLost, p.UseRingBuffers()) + m.eventStreamMonitor, err = eventstream.NewEventStreamMonitor(p.config.Probe, p.Erpc, p.Manager, p.statsdClient, p.onEventLost, p.useRingBuffers) if err != nil { return fmt.Errorf("couldn't create the events statistics monitor: %w", err) } From 053208a1cc15b89100b92eaed90fef942685116d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Tue, 28 Jan 2025 05:57:23 -0500 Subject: [PATCH 07/97] [ADXT-861] Add flake.MarkOnLog (#33246) --- pkg/util/testutil/flake/flake.go | 105 ++++++++++++++++++++++++ tasks/gotest.py | 9 +- tasks/new_e2e_tests.py | 20 ++++- tasks/testwasher.py | 64 ++++++++------- tasks/unit_tests/e2e_testing_tests.py | 4 +- tasks/unit_tests/testdata/flakes_a.yaml | 1 + tasks/unit_tests/testdata/flakes_b.yaml | 1 + tasks/unit_tests/testwasher_tests.py | 43 +++++++--- 8 files changed, 198 insertions(+), 49 deletions(-) create mode 100644 tasks/unit_tests/testdata/flakes_a.yaml create mode 100644 tasks/unit_tests/testdata/flakes_b.yaml diff --git a/pkg/util/testutil/flake/flake.go b/pkg/util/testutil/flake/flake.go index c6946a688d4e33..a11f37e470dfce 100644 --- a/pkg/util/testutil/flake/flake.go +++ b/pkg/util/testutil/flake/flake.go @@ -11,14 +11,22 @@ package flake import ( "flag" + "fmt" "os" + "path/filepath" + "runtime" "strconv" + "strings" + "sync" "testing" + + "gopkg.in/yaml.v3" ) const flakyTestMessage = "flakytest: this is a known flaky test" var skipFlake = flag.Bool("skip-flake", false, "skip tests labeled as flakes") +var flakyPatternsConfigMutex = sync.Mutex{} // Mark test as a known flaky. // If any of skip-flake flag or GO_TEST_SKIP_FLAKE environment variable is set, the test will be skipped. @@ -32,6 +40,103 @@ func Mark(t testing.TB) { t.Log(flakyTestMessage) } +// Get the test function package which is the topmost function in the stack that is part of the datadog-agent package +func getPackageName() (string, error) { + fullPackageName := "" + for i := 0; i < 42; i++ { + pc, _, _, ok := runtime.Caller(i) + if !ok { + // Top of the stack + break + } + fullname := runtime.FuncForPC(pc).Name() + if strings.Contains(fullname, "datadog-agent") { + fullPackageName = fullname + } + } + + if fullPackageName == "" { + return "", fmt.Errorf("failed to fetch e2e test function information") + } + + prefix := filepath.FromSlash("github.com/DataDog/datadog-agent/") + fullPackageName = strings.TrimPrefix(fullPackageName, prefix) + nameParts := strings.Split(fullPackageName, ".") + packageName := nameParts[0] + + return packageName, nil +} + +// MarkOnLog marks the test as flaky when the `pattern` regular expression is found in its logs. +func MarkOnLog(t testing.TB, pattern string) { + // Types for the yaml file + type testEntry struct { + Test string `yaml:"test"` + OnLog string `yaml:"on-log"` + } + type configEntries = map[string][]testEntry + + t.Helper() + flakyPatternsConfig := os.Getenv("E2E_FLAKY_PATTERNS_CONFIG") + if flakyPatternsConfig == "" { + t.Log("Warning: flake.MarkOnLog will not mark tests as flaky since E2E_FLAKY_PATTERNS_CONFIG is not set") + return + } + + // Avoid race conditions + flakyPatternsConfigMutex.Lock() + defer flakyPatternsConfigMutex.Unlock() + + flakyConfig := make(configEntries) + + // Read initial config + _, err := os.Stat(flakyPatternsConfig) + if err == nil { + f, err := os.Open(flakyPatternsConfig) + if err != nil { + t.Logf("Warning: failed to open flaky patterns config file: %v", err) + return + } + defer f.Close() + + dec := yaml.NewDecoder(f) + err = dec.Decode(&flakyConfig) + if err != nil { + t.Logf("Warning: failed to decode flaky patterns config file: %v", err) + return + } + } + + packageName, err := getPackageName() + if err != nil { + t.Logf("Warning: failed to get package name: %v", err) + return + } + + // Update config by adding an entry to this test with this pattern + entry := testEntry{Test: t.Name(), OnLog: pattern} + if packageConfig, ok := flakyConfig[packageName]; ok { + flakyConfig[packageName] = append(packageConfig, entry) + } else { + flakyConfig[packageName] = []testEntry{entry} + } + + // Write config back + f, err := os.OpenFile(flakyPatternsConfig, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Logf("Warning: failed to open flaky patterns config file: %v", err) + return + } + defer f.Close() + + encoder := yaml.NewEncoder(f) + err = encoder.Encode(flakyConfig) + if err != nil { + t.Logf("Warning: failed to encode flaky patterns config file: %v", err) + return + } +} + func shouldSkipFlake() bool { if *skipFlake { return true diff --git a/tasks/gotest.py b/tasks/gotest.py index af889e8b6dcae0..1d331a3328d1fe 100644 --- a/tasks/gotest.py +++ b/tasks/gotest.py @@ -205,7 +205,9 @@ def sanitize_env_vars(): del os.environ[env] -def process_test_result(test_results, junit_tar: str, flavor: AgentFlavor, test_washer: bool) -> bool: +def process_test_result( + test_results, junit_tar: str, flavor: AgentFlavor, test_washer: bool, extra_flakes_config: str | None = None +) -> bool: if junit_tar: junit_files = [ module_test_result.junit_file_path @@ -225,7 +227,10 @@ def process_test_result(test_results, junit_tar: str, flavor: AgentFlavor, test_ if not test_washer: print("Test washer is always enabled in the CI, enforcing it") - tw = TestWasher() + flakes_configs = ["flakes.yaml"] + if extra_flakes_config is not None: + flakes_configs.append(extra_flakes_config) + tw = TestWasher(flakes_file_paths=flakes_configs) print( "Processing test results for known flakes. Learn more about flake marker and test washer at https://datadoghq.atlassian.net/wiki/spaces/ADX/pages/3405611398/Flaky+tests+in+go+introducing+flake.Mark" ) diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py index 55dcf6224c27c6..46452ce557eedc 100644 --- a/tasks/new_e2e_tests.py +++ b/tasks/new_e2e_tests.py @@ -127,6 +127,13 @@ def run( if test_run_name != "": test_run_arg = f"-run {test_run_name}" + # Create temporary file for flaky patterns config + tmp_flaky_patterns_config = tempfile.NamedTemporaryFile(suffix="flaky_patterns_config.yaml", delete_on_close=False) + tmp_flaky_patterns_config.write(b"{}") + tmp_flaky_patterns_config.close() + flaky_patterns_config = tmp_flaky_patterns_config.name + env_vars["E2E_FLAKY_PATTERNS_CONFIG"] = flaky_patterns_config + cmd = f'gotestsum --format {gotestsum_format} ' scrubber_raw_command = "" # Scrub the test output to avoid leaking API or APP keys when running in the CI @@ -158,6 +165,7 @@ def run( "src_agent_version": f"-src-agent-version {src_agent_version}" if src_agent_version else '', "dest_agent_version": f"-dest-agent-version {dest_agent_version}" if dest_agent_version else '', "keep_stacks": '-keep-stacks' if keep_stacks else '', + "flaky_patterns_config": f'--flaky-patterns-config={flaky_patterns_config}' if flaky_patterns_config else '', "extra_flags": extra_flags, } @@ -174,7 +182,9 @@ def run( test_profiler=None, ) - success = process_test_result(test_res, junit_tar, AgentFlavor.base, test_washer) + success = process_test_result( + test_res, junit_tar, AgentFlavor.base, test_washer, extra_flakes_config=flaky_patterns_config + ) if running_in_ci(): # Do not print all the params, they could contain secrets needed only in the CI @@ -200,7 +210,9 @@ def run( os.makedirs(logs_folder, exist_ok=True) write_result_to_log_files(post_processed_output, logs_folder) - pretty_print_logs(test_res[0].result_json_path, post_processed_output) + pretty_print_logs( + test_res[0].result_json_path, post_processed_output, flakes_files=["flakes.yaml", flaky_patterns_config] + ) else: print( color_message("WARNING", "yellow") @@ -383,7 +395,7 @@ def pretty_print_test_logs(logs_per_test: list[tuple[str, str, str]], max_size): return size -def pretty_print_logs(result_json_path, logs_per_test, max_size=250000, flakes_file="flakes.yaml"): +def pretty_print_logs(result_json_path, logs_per_test, max_size=250000, flakes_files=None): """Pretty prints logs with a specific order. Print order: @@ -395,7 +407,7 @@ def pretty_print_logs(result_json_path, logs_per_test, max_size=250000, flakes_f result_json_name = result_json_path.split("/")[-1] result_json_dir = result_json_path.removesuffix('/' + result_json_name) - washer = TestWasher(test_output_json_file=result_json_name, flakes_file_path=flakes_file) + washer = TestWasher(test_output_json_file=result_json_name, flakes_file_paths=flakes_files or ["flakes.yaml"]) failing_tests, marked_flaky_tests = washer.parse_test_results(result_json_dir) all_known_flakes = washer.merge_known_flakes(marked_flaky_tests) diff --git a/tasks/testwasher.py b/tasks/testwasher.py index 853bb13da79947..67ed699f0f8bd3 100644 --- a/tasks/testwasher.py +++ b/tasks/testwasher.py @@ -24,18 +24,24 @@ def __init__( self, test_output_json_file="module_test_output.json", flaky_test_indicator=FLAKY_TEST_INDICATOR, - flakes_file_path="flakes.yaml", + flakes_file_paths: list[str] | None = None, ): + """Used to deduce which tests are flaky using the resulting test output and the flaky configurations. + + Args: + - flakes_file_paths: Paths to flake configuration files that will be merged. ["flakes.yaml"] by default + """ + self.test_output_json_file = test_output_json_file self.flaky_test_indicator = flaky_test_indicator - self.flakes_file_path = flakes_file_path + self.flakes_file_paths = flakes_file_paths or ["flakes.yaml"] self.known_flaky_tests = defaultdict(set) # flaky_log_patterns[package][test] = [pattern1, pattern2...] - self.flaky_log_patterns = defaultdict(dict) + self.flaky_log_patterns = defaultdict(lambda: defaultdict(list)) # Top level `on-log` used to have a pattern for every test self.flaky_log_main_patterns = [] - self.parse_flaky_file() + self.parse_flaky_files() def get_non_flaky_failing_tests(self, failing_tests: dict, flaky_marked_tests: dict): """ @@ -69,37 +75,39 @@ def merge_known_flakes(self, marked_flakes): known_flakes[package] = tests return known_flakes - def parse_flaky_file(self): + def parse_flaky_files(self): """ - Parse the flakes.yaml file and add the tests listed there to the kown flaky tests list + Parse the flakes.yaml like files and add the tests listed there to the known flaky tests list / the flaky log patterns to the flaky log patterns list """ reserved_keywords = ("on-log",) - with open(self.flakes_file_path) as f: - flakes = yaml.safe_load(f) + for path in self.flakes_file_paths: + with open(path) as f: + flakes = yaml.safe_load(f) - if not flakes: - return - - # Add the tests to the known flaky tests list - for package, tests in flakes.items(): - if package in reserved_keywords: + if not flakes: continue - for test in tests: - if 'on-log' in test: - patterns = test['on-log'] - if isinstance(patterns, str): - patterns = [patterns] - self.flaky_log_patterns[f"github.com/DataDog/datadog-agent/{package}"][test['test']] = patterns - else: - # If there is no `on-log`, we consider it as a known flaky test right away - self.known_flaky_tests[f"github.com/DataDog/datadog-agent/{package}"].add(test['test']) - - # on-log patterns at the top level - self.flaky_log_main_patterns = flakes.get('on-log', []) - if isinstance(self.flaky_log_main_patterns, str): - self.flaky_log_main_patterns = [self.flaky_log_main_patterns] + # Add the tests to the known flaky tests list + for package, tests in flakes.items(): + if package in reserved_keywords: + continue + + for test in tests: + if 'on-log' in test: + patterns = test['on-log'] + if isinstance(patterns, str): + patterns = [patterns] + self.flaky_log_patterns[f"github.com/DataDog/datadog-agent/{package}"][test['test']] += patterns + else: + # If there is no `on-log`, we consider it as a known flaky test right away + self.known_flaky_tests[f"github.com/DataDog/datadog-agent/{package}"].add(test['test']) + + # on-log patterns at the top level + main_patterns = flakes.get('on-log', []) + if isinstance(main_patterns, str): + main_patterns = [main_patterns] + self.flaky_log_main_patterns += main_patterns def parse_test_results(self, module_path: str) -> tuple[dict, dict]: failing_tests = defaultdict(set) diff --git a/tasks/unit_tests/e2e_testing_tests.py b/tasks/unit_tests/e2e_testing_tests.py index 565470fe08d5e2..258071bb5aac86 100644 --- a/tasks/unit_tests/e2e_testing_tests.py +++ b/tasks/unit_tests/e2e_testing_tests.py @@ -11,7 +11,7 @@ def test_pretty_print(self, p): flakes_file = "tasks/unit_tests/testdata/flakes_2.yaml" path = "tasks/unit_tests/testdata/test_output_failure_marker.json" - pretty_print_logs(path, post_process_output(path), flakes_file=flakes_file) + pretty_print_logs(path, post_process_output(path), flakes_files=[flakes_file]) # Failing / flaky, successful / non flaky self.assertEqual(p.call_count, 2) @@ -28,7 +28,7 @@ def test_pretty_print2(self, p=None): flakes_file = "tasks/unit_tests/testdata/flakes_1.yaml" path = "tasks/unit_tests/testdata/test_output_failure_no_marker.json" - pretty_print_logs(path, post_process_output(path), flakes_file=flakes_file) + pretty_print_logs(path, post_process_output(path), flakes_files=[flakes_file]) # Failing / flaky, successful / non flaky self.assertEqual(p.call_count, 2) diff --git a/tasks/unit_tests/testdata/flakes_a.yaml b/tasks/unit_tests/testdata/flakes_a.yaml new file mode 100644 index 00000000000000..6f83e9cd85f013 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_a.yaml @@ -0,0 +1 @@ +on-log: "PASS: TestFilterDev/ReplaceDevMountLength" diff --git a/tasks/unit_tests/testdata/flakes_b.yaml b/tasks/unit_tests/testdata/flakes_b.yaml new file mode 100644 index 00000000000000..64f97630fb31b6 --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_b.yaml @@ -0,0 +1 @@ +on-log: "PASS: TestAsJson" diff --git a/tasks/unit_tests/testwasher_tests.py b/tasks/unit_tests/testwasher_tests.py index 7811fa37dc8c18..bfe6a98df5b6ff 100644 --- a/tasks/unit_tests/testwasher_tests.py +++ b/tasks/unit_tests/testwasher_tests.py @@ -7,7 +7,7 @@ class TestUtils(unittest.TestCase): def test_flaky_marked_failing_test(self): test_washer_1 = TestWasher( test_output_json_file="test_output_failure_marker.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_2.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer_1.parse_test_results(module_path) @@ -19,7 +19,7 @@ def test_flaky_marked_failing_test(self): def test_flakes_file_failing_test(self): test_washer_2 = TestWasher( test_output_json_file="test_output_failure_no_marker.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_1.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_1.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer_2.parse_test_results(module_path) @@ -31,7 +31,7 @@ def test_flakes_file_failing_test(self): def test_should_fail_failing_tests(self): test_washer_3 = TestWasher( test_output_json_file="test_output_failure_no_marker.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_2.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer_3.parse_test_results(module_path) @@ -43,7 +43,7 @@ def test_should_fail_failing_tests(self): def test_should_mark_parent_flaky(self): test_washer = TestWasher( test_output_json_file="test_output_failure_parent.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_2.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -58,7 +58,7 @@ def test_should_mark_parent_flaky(self): def test_should_not_be_considered_flaky(self): test_washer = TestWasher( test_output_json_file="test_output_failure_only_parent.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_3.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_3.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -73,7 +73,7 @@ def test_should_not_be_considered_flaky(self): def test_flaky_panicking_test(self): test_washer = TestWasher( test_output_json_file="test_output_failure_flaky_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_2.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -85,7 +85,7 @@ def test_flaky_panicking_test(self): def test_non_flaky_panicking_test(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_2.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -100,7 +100,7 @@ def test_non_flaky_panicking_test(self): def test_flaky_panicking_flakesyaml_test(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_4.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_4.yaml"], ) module_path = "tasks/unit_tests/testdata" failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -112,7 +112,7 @@ def test_flaky_panicking_flakesyaml_test(self): def test_flaky_on_log(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_5.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_5.yaml"], ) module_path = "tasks/unit_tests/testdata" _, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -122,7 +122,7 @@ def test_flaky_on_log(self): def test_flaky_on_log2(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_6.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_6.yaml"], ) module_path = "tasks/unit_tests/testdata" _, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -135,7 +135,7 @@ def test_flaky_on_log2(self): def test_flaky_on_log3(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_7.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_7.yaml"], ) module_path = "tasks/unit_tests/testdata" _, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -148,7 +148,7 @@ def test_flaky_on_log3(self): def test_flaky_on_log4(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_8.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_8.yaml"], ) module_path = "tasks/unit_tests/testdata" _, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -161,7 +161,7 @@ def test_flaky_on_log4(self): def test_flaky_on_log5(self): test_washer = TestWasher( test_output_json_file="test_output_failure_panic.json", - flakes_file_path="tasks/unit_tests/testdata/flakes_9.yaml", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_9.yaml"], ) module_path = "tasks/unit_tests/testdata" _, marked_flaky_tests = test_washer.parse_test_results(module_path) @@ -171,6 +171,23 @@ def test_flaky_on_log5(self): {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, ) + def test_flaky_merge(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_no_marker.json", + flakes_file_paths=["tasks/unit_tests/testdata/flakes_a.yaml", "tasks/unit_tests/testdata/flakes_b.yaml"], + ) + module_path = "tasks/unit_tests/testdata" + _, marked_flaky_tests = test_washer.parse_test_results(module_path) + self.assertEqual( + marked_flaky_tests, + { + 'github.com/DataDog/datadog-agent/pkg/gohai/filesystem': { + 'TestAsJSON', + 'TestFilterDev/ReplaceDevMountLength', + } + }, + ) + class TestMergeKnownFlakes(unittest.TestCase): def test_with_shared_keys(self): From 4d7237317755fc6d2d8a2ec2caf94b25773854db Mon Sep 17 00:00:00 2001 From: pducolin <45568537+pducolin@users.noreply.github.com> Date: Tue, 28 Jan 2025 12:21:11 +0100 Subject: [PATCH 08/97] [task] add windows-dev-env.run (#32827) Co-authored-by: KevinFairise2 --- tasks/windows_dev_env.py | 177 +++++++++++++++++++++++++++++++++------ 1 file changed, 152 insertions(+), 25 deletions(-) diff --git a/tasks/windows_dev_env.py b/tasks/windows_dev_env.py index 621df82d9d7c28..8045d425ef40eb 100644 --- a/tasks/windows_dev_env.py +++ b/tasks/windows_dev_env.py @@ -2,16 +2,19 @@ Create a remote Windows development environment and keep it in sync with local changes. """ +import json import os import re import shutil import time from datetime import timedelta +from typing import Any from invoke.context import Context from invoke.tasks import task AMI_WINDOWS_DEV_2022 = "ami-09b68440cb06b26d6" +WIN_CONTAINER_NAME = "windows-dev-env" @task( @@ -44,19 +47,42 @@ def stop( _stop_windows_dev_env(ctx, name) -def _start_windows_dev_env(ctx, name: str = "windows-dev-env"): - start_time = time.time() - # lazy load watchdog to avoid import error on the CI - from watchdog.events import FileSystemEvent, FileSystemEventHandler - from watchdog.observers import Observer +@task( + help={ + 'name': 'Override the default name of the development environment (windows-dev-env).', + 'command': 'Command to run on a windows dev container', + }, +) +def run( + ctx: Context, + name: str = "windows-dev-env", + command: str = "", +): + """ + Runs a command on a remote Windows development environment. + """ - class DDAgentEventHandler(FileSystemEventHandler): - def __init__(self, ctx: Context, command: str): - self.ctx = ctx - self.command = command + with ctx.cd('../test-infra-definitions'): + # find connection info for the VM + result = ctx.run(f"inv aws.show-vm --stack-name={name}", hide=True) + if result is None or not result: + raise Exception("Failed to find the Windows development environment.") + host = RemoteHost(result.stdout) + rsync_command = _build_rsync_command(f"Administrator@{host.address}") + print("Syncing changes to the remote Windows development environment...") + ctx.run(rsync_command) - def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watchdog callback - _on_changed_path_run_command(self.ctx, event.src_path, self.command) + exit( + _run_on_windows_dev_env( + ctx, + name, + f'. ./tasks/winbuildscripts/common.ps1; Invoke-BuildScript -InstallDeps \\$false -Command {{{command}}}', + ) + ) + + +def _start_windows_dev_env(ctx, name: str = "windows-dev-env"): + start_time = time.time() # Ensure `test-infra-definitions` is cloned. if not os.path.isdir('../test-infra-definitions'): @@ -74,7 +100,7 @@ def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watc host = "" with ctx.cd('../test-infra-definitions'): result = ctx.run( - f"inv aws.create-vm --ami-id={AMI_WINDOWS_DEV_2022} --os-family=windows --architecture=x86_64 --no-install-agent --stack-name={name} --no-interactive" + f"inv aws.create-vm --ami-id={AMI_WINDOWS_DEV_2022} --os-family=windows --architecture=x86_64 --no-install-agent --stack-name={name} --no-interactive --instance-type=t3.xlarge" ) if result is None or not result: raise Exception("Failed to create the Windows development environment.") @@ -86,9 +112,62 @@ def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watc raise Exception("Failed to find pulumi output in stdout.") # extract username and address from connection message host = connection_message.split()[0] + print("Disabling Windows Defender and rebooting the Windows dev environment...") + _disable_WD_and_reboot(ctx, host) + _wait_for_windows_dev_env(ctx, host) + print("Host rebooted") + # check if Windows dev container is already running + should_start_container = True + result = ctx.run(f"ssh {host} 'docker ps -q --filter name=windows-dev-env'", warn=True, hide=True) + if result is not None and result.exited == 0 and len(result.stdout) > 0: + print("🐳 Windows dev env already running") + should_start_container = False + # start the Windows dev container, if not already running + if should_start_container: + print("🐳 Starting Windows dev container") + ctx.run( + f"ssh {host} 'docker run -m 16384 -v C:\\mnt:c:\\mnt:rw -w C:\\mnt\\datadog-agent -t -d --name {WIN_CONTAINER_NAME} datadog/agent-buildimages-windows_x64:ltsc2022 tail -f /dev/null'" + ) + # Pull the latest version of datadog-agent to make initial sync faster + print("Pulling the latest version of datadog-agent to make initial sync faster...") + _run_on_windows_dev_env(ctx, name, "git pull") + print("Pulling the latest version of datadog-agent done") # sync local changes to the remote Windows development environment - # -aqzrcIR + rsync_command = _build_rsync_command(host) + print("Syncing changes to the remote Windows development environment...") + ctx.run(rsync_command) + print("Syncing changes to the remote Windows development done") + print("Installing all dependencies in the Windows dev container... this may take a long time") + _run_on_windows_dev_env( + ctx, + name, + ". ./tasks/winbuildscripts/common.ps1; Invoke-BuildScript -InstallTestingDeps \\$true -InstallDeps \\$true -Command {inv -e tidy}", + ) + # print the time taken to start the dev env + elapsed_time = time.time() - start_time + print("♻️ Windows dev env started in", timedelta(seconds=elapsed_time)) + _run_command_on_local_changes(ctx, rsync_command) + print("♻️ Windows dev env sync stopped") + print("Start it again with `inv windows_dev_env.start`") + print("Destroy the Windows dev env with `inv windows-dev-env.stop`") + + +def _disable_WD_and_reboot(ctx, host): + ctx.run(f"ssh {host} 'Remove-WindowsFeature Windows-Defender'") + ctx.run(f"ssh {host} 'Restart-Computer -Force'") + + +def _wait_for_windows_dev_env(ctx, host): + while True: + r = ctx.run(f"ssh {host} 'Get-MpComputerStatus | select Antivirus'", hide=True, warn=True) + if "Invalid class" in r.stderr: + break + + time.sleep(5) + + +def _build_rsync_command(host: str) -> str: # -a: archive mode; equals -rlptgoD (no -H) # -z: compress file data during the transfer # -r: recurse into directories @@ -96,19 +175,26 @@ def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watc # -I: --ignore-times # -P: same as --partial --progress, show partial progress during transfer # -R: use relative path names - rsync_command = f"rsync -azrcIPR --delete --rsync-path='C:\\cygwin\\bin\\rsync.exe' --filter=':- .gitignore' --exclude /.git/ . {host}:/cygdrive/c/mnt/datadog-agent/" - print("Syncing changes to the remote Windows development environment...") - ctx.run(rsync_command) - print("Syncing changes to the remote Windows development done") - # print the time taken to start the dev env - elapsed_time = time.time() - start_time - print("♻️ Windows dev env started in", timedelta(seconds=elapsed_time)) + return f"rsync --chmod=ugo=rwX -azrcIPR --delete --rsync-path='C:\\cygwin\\bin\\rsync.exe' --filter=':- .gitignore' --exclude /.git/ . {host}:/cygdrive/c/mnt/datadog-agent/" + + +def _run_command_on_local_changes(ctx: Context, command: str): + # lazy load watchdog to avoid import error on the CI + from watchdog.events import FileSystemEvent, FileSystemEventHandler + from watchdog.observers import Observer - event_handler = DDAgentEventHandler(ctx=ctx, command=rsync_command) + class DDAgentEventHandler(FileSystemEventHandler): + def __init__(self, ctx: Context, command: str): + self.ctx = ctx + self.command = command + + def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watchdog callback + _on_changed_path_run_command(self.ctx, event.src_path, self.command) + + event_handler = DDAgentEventHandler(ctx=ctx, command=command) observer = Observer() observer.schedule(event_handler, ".", recursive=True) observer.start() - try: while True: time.sleep(1) @@ -116,9 +202,6 @@ def on_any_event(self, event: FileSystemEvent) -> None: # noqa # called by watc observer.stop() finally: observer.join() - print("♻️ Windows dev env sync stopped") - print("Start it again with `inv windows_dev_env.start`") - print("Destroy the Windows dev env with `inv windows-dev-env.stop`") # start file watcher and run rsync on changes @@ -141,3 +224,47 @@ def _on_changed_path_run_command(ctx: Context, path: str, command: str): def _stop_windows_dev_env(ctx, name: str = "windows-dev-env"): with ctx.cd('../test-infra-definitions'): ctx.run(f"inv aws.destroy-vm --stack-name={name}") + + +class RemoteHost: + def __init__(self, output: str): + remoteHost: Any = json.loads(output) + self.address: str = remoteHost["address"] + self.user: str = remoteHost["user"] + self.password: str | None = "password" in remoteHost and remoteHost["password"] or None + self.port: int | None = "port" in remoteHost and remoteHost["port"] or None + + +def _run_on_windows_dev_env(ctx: Context, name: str = "windows-dev-env", command: str = "") -> int: + with ctx.cd('../test-infra-definitions'): + # find connection info for the VM + result = ctx.run(f"inv aws.show-vm --stack-name={name}", hide=True) + if result is None or not result: + raise Exception("Failed to find the Windows development environment.") + host = RemoteHost(result.stdout) + # run the command on the Windows development environment + docker_command_parts = [ + 'docker', + 'exec', + '-it', + WIN_CONTAINER_NAME, + 'powershell', + f"'{command}'", + ] + joined_docker_command_parts = ' '.join(docker_command_parts) + command_parts = [ + "ssh", + f'{host.user}@{host.address}', + "-p", + f'{host.port}', + "-t", + f'"{joined_docker_command_parts}"', + ] + result = ctx.run( + ' '.join(command_parts), + pty=True, + warn=True, + ) + if result is None or not result: + raise Exception("Failed to run the command on the Windows development environment.") + return result.exited From c59475d210b62bec1705f7cd1106d6c62257454d Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Tue, 28 Jan 2025 12:21:38 +0100 Subject: [PATCH 09/97] feat(renovate): Add a renovate configuration for auto-bump of buildimage (#33299) --- .github/CODEOWNERS | 1 + renovate.json | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 renovate.json diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ea9e9115fb5dc2..688f3fe746ea1d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -37,6 +37,7 @@ /mkdocs.yml @DataDog/agent-devx-infra /release.json @DataDog/agent-delivery @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations @DataDog/agent-security +/renovate.json @DataDog/agent-devx-infra /requirements.txt @DataDog/agent-devx-infra /pyproject.toml @DataDog/agent-devx-infra @DataDog/agent-devx-loops /repository.datadog.yml @DataDog/agent-devx-infra diff --git a/renovate.json b/renovate.json new file mode 100644 index 00000000000000..ff1f0dc60d2fa0 --- /dev/null +++ b/renovate.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "customManagers" : [ + { + "customType": "regex", + "fileMatch": [".gitlab-ci.yml"], + "matchStrings": [ + " DATADOG_AGENT_[^:]*: (?v.*)", + " CI_IMAGE_[^:]*: (?v.*)" + ], + "depNameTemplate": "buildimages", + "versioningTemplate": "loose", + "datasourceTemplate": "custom.buildimages" + } + ], + "customDatasources": { + "buildimages": { + "defaultRegistryUrlTemplate": "https://hub.docker.com/v2/namespaces/datadog/repositories/agent-buildimages-deb_x64/tags", + "transformTemplates": [ + "{\"releases\": $map(results, function($v) { {\"version\": $v.name, \"releaseTimestamp\": $v.last_updated } }) }" + ] + } + } + } From 135d9ec10b1c5331908b3ae518b9dc32b8e8203c Mon Sep 17 00:00:00 2001 From: val06 Date: Tue, 28 Jan 2025 13:30:16 +0200 Subject: [PATCH 10/97] [EBPF] added telemetry to the sysprobe remote client (#33308) --- cmd/system-probe/api/client/client.go | 28 ++++++++++++++++ cmd/system-probe/api/client/client_test.go | 37 ++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/cmd/system-probe/api/client/client.go b/cmd/system-probe/api/client/client.go index 780dbb972fe0ee..e3618353fbc330 100644 --- a/cmd/system-probe/api/client/client.go +++ b/cmd/system-probe/api/client/client.go @@ -18,14 +18,34 @@ import ( "time" "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" + "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/funcs" ) +const ( + checkLabelName = "check" + telemetrySubsystem = "system_probe__remote_client" +) + var ( // ErrNotImplemented is an error used when system-probe is attempted to be accessed on an unsupported OS ErrNotImplemented = errors.New("system-probe unsupported") ) +var checkTelemetry = struct { + totalRequests telemetry.Counter + failedRequests telemetry.Counter + failedResponses telemetry.Counter + responseErrors telemetry.Counter + malformedResponses telemetry.Counter +}{ + telemetry.NewCounter(telemetrySubsystem, "requests__total", []string{checkLabelName}, "Counter measuring how many system-probe check requests were made"), + telemetry.NewCounter(telemetrySubsystem, "requests__failed", []string{checkLabelName}, "Counter measuring how many system-probe check requests failed to be sent"), + telemetry.NewCounter(telemetrySubsystem, "responses__not_received", []string{checkLabelName}, "Counter measuring how many responses from system-probe check were not read from the socket"), + telemetry.NewCounter(telemetrySubsystem, "responses__errors", []string{checkLabelName}, "Counter measuring how many non_ok status code received from system-probe checks"), + telemetry.NewCounter(telemetrySubsystem, "responses__malformed", []string{checkLabelName}, "Counter measuring how many malformed responses were received from system-probe checks"), +} + // Get returns a http client configured to talk to the system-probe var Get = funcs.MemoizeArgNoError[string, *http.Client](get) @@ -45,27 +65,35 @@ func get(socketPath string) *http.Client { // GetCheck returns data unmarshalled from JSON to T, from the specified module at the //check endpoint. func GetCheck[T any](client *http.Client, module types.ModuleName) (T, error) { + checkTelemetry.totalRequests.IncWithTags(map[string]string{checkLabelName: string(module)}) var data T req, err := http.NewRequest("GET", ModuleURL(module, "/check"), nil) if err != nil { + //we don't have a counter for this case, because this function can't really fail, since ModuleURL function constructs a safe URL return data, err } resp, err := client.Do(req) if err != nil { + checkTelemetry.failedRequests.IncWithTags(map[string]string{checkLabelName: string(module)}) return data, err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { + checkTelemetry.failedResponses.IncWithTags(map[string]string{checkLabelName: string(module)}) return data, err } if resp.StatusCode != http.StatusOK { + checkTelemetry.responseErrors.IncWithTags(map[string]string{checkLabelName: string(module)}) return data, fmt.Errorf("non-ok status code: url %s, status_code: %d, response: `%s`", req.URL, resp.StatusCode, string(body)) } err = json.Unmarshal(body, &data) + if err != nil { + checkTelemetry.malformedResponses.IncWithTags(map[string]string{checkLabelName: string(module)}) + } return data, err } diff --git a/cmd/system-probe/api/client/client_test.go b/cmd/system-probe/api/client/client_test.go index d67af13e795aff..1fa4fbf3cab832 100644 --- a/cmd/system-probe/api/client/client_test.go +++ b/cmd/system-probe/api/client/client_test.go @@ -27,6 +27,22 @@ func TestConstructURL(t *testing.T) { assert.Equal(t, "http://sysprobe/zzzz/asdf", u) } +type expectedTelemetryValues struct { + totalRequests float64 + failedRequests float64 + failedResponses float64 + responseErrors float64 + malformedResponses float64 +} + +func validateTelemetry(t *testing.T, module string, expected expectedTelemetryValues) { + assert.Equal(t, expected.totalRequests, checkTelemetry.totalRequests.WithValues(module).Get(), "mismatched totalRequests counter value") + assert.Equal(t, expected.failedRequests, checkTelemetry.failedRequests.WithValues(module).Get(), "mismatched failedRequest counter value") + assert.Equal(t, expected.failedResponses, checkTelemetry.failedResponses.WithValues(module).Get(), "mismatched failedResponses counter value") + assert.Equal(t, expected.responseErrors, checkTelemetry.responseErrors.WithValues(module).Get(), "mismatched responseErrors counter value") + assert.Equal(t, expected.malformedResponses, checkTelemetry.malformedResponses.WithValues(module).Get(), "mismatched malformedResponses counter value") +} + func TestGetCheck(t *testing.T) { type testData struct { Str string @@ -36,6 +52,9 @@ func TestGetCheck(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/test/check" { _, _ = w.Write([]byte(`{"Str": "asdf", "Num": 42}`)) + } else if r.URL.Path == "/malformed/check" { + //this should fail in json.Unmarshal + _, _ = w.Write([]byte("1")) } else { w.WriteHeader(http.StatusNotFound) } @@ -46,8 +65,26 @@ func TestGetCheck(t *testing.T) { return net.Dial("tcp", server.Listener.Addr().String()) }}} + //test happy flow resp, err := GetCheck[testData](client, "test") require.NoError(t, err) assert.Equal(t, "asdf", resp.Str) assert.Equal(t, 42, resp.Num) + validateTelemetry(t, "test", expectedTelemetryValues{1, 0, 0, 0, 0}) + + //test responseError counter + resp, err = GetCheck[testData](client, "foo") + require.Error(t, err) + validateTelemetry(t, "foo", expectedTelemetryValues{1, 0, 0, 1, 0}) + + //test malformedResponses counter + resp, err = GetCheck[testData](client, "malformed") + require.Error(t, err) + validateTelemetry(t, "malformed", expectedTelemetryValues{1, 0, 0, 0, 1}) + + //test failedRequests counter + server.Close() + resp, err = GetCheck[testData](client, "test") + require.Error(t, err) + validateTelemetry(t, "test", expectedTelemetryValues{2, 1, 0, 0, 0}) } From 38e326b1cb1cc0910884999a29d082c01696b2ac Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 28 Jan 2025 12:42:56 +0100 Subject: [PATCH 11/97] sbom: allow collector to scan library packages and any relationships (#33409) --- go.mod | 2 +- pkg/util/trivy/trivy.go | 39 ++++++++++++--------------------------- 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index fa6c92295b9ae4..774f2c188374cf 100644 --- a/go.mod +++ b/go.mod @@ -190,7 +190,7 @@ require ( github.com/acobaugh/osrelease v0.1.0 github.com/alecthomas/participle v0.7.1 // indirect github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 - github.com/aquasecurity/trivy-db v0.0.0-20240910133327-7e0f4d2ed4c1 + github.com/aquasecurity/trivy-db v0.0.0-20240910133327-7e0f4d2ed4c1 // indirect github.com/avast/retry-go/v4 v4.6.0 github.com/aws/aws-lambda-go v1.37.0 github.com/aws/aws-sdk-go v1.55.6 // indirect diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index 2a4abe21a73436..215673b2f2706d 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -23,7 +23,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/option" - "github.com/aquasecurity/trivy-db/pkg/db" "github.com/aquasecurity/trivy/pkg/fanal/analyzer" "github.com/aquasecurity/trivy/pkg/fanal/applier" "github.com/aquasecurity/trivy/pkg/fanal/artifact" @@ -33,10 +32,7 @@ import ( "github.com/aquasecurity/trivy/pkg/fanal/walker" "github.com/aquasecurity/trivy/pkg/sbom/cyclonedx" "github.com/aquasecurity/trivy/pkg/scanner" - "github.com/aquasecurity/trivy/pkg/scanner/langpkg" - "github.com/aquasecurity/trivy/pkg/scanner/ospkg" "github.com/aquasecurity/trivy/pkg/types" - "github.com/aquasecurity/trivy/pkg/vulnerability" // This is required to load sqlite based RPM databases _ "modernc.org/sqlite" @@ -64,9 +60,6 @@ type Collector struct { config collectorConfig cacheInitialized sync.Once persistentCache CacheWithCleaner - osScanner ospkg.Scanner - langScanner langpkg.Scanner - vulnClient vulnerability.Client marshaler cyclonedx.Marshaler wmeta option.Option[workloadmeta.Component] } @@ -158,11 +151,8 @@ func NewCollector(cfg config.Component, wmeta option.Option[workloadmeta.Compone maxCacheSize: cfg.GetInt("sbom.cache.max_disk_size"), overlayFSSupport: cfg.GetBool("sbom.container_image.overlayfs_direct_scan"), }, - osScanner: ospkg.NewScanner(), - langScanner: langpkg.NewScanner(), - vulnClient: vulnerability.NewClient(db.Config{}), - marshaler: cyclonedx.NewMarshaler(""), - wmeta: wmeta, + marshaler: cyclonedx.NewMarshaler(""), + wmeta: wmeta, }, nil } @@ -294,21 +284,18 @@ func (d *driver) Scan(_ context.Context, target, artifactKey string, blobKeys [] return nil, ftypes.OS{}, xerrors.Errorf("failed to apply layers: %w", err) } - scanTarget := types.ScanTarget{ - Name: target, - OS: detail.OS, - Repository: detail.Repository, - Packages: detail.Packages, - } - result := types.Result{ Target: fmt.Sprintf("%s (%s %s)", target, detail.OS.Family, detail.OS.Name), Class: types.ClassOSPkg, - Type: scanTarget.OS.Family, + Type: detail.OS.Family, } - sort.Sort(scanTarget.Packages) - result.Packages = scanTarget.Packages + sort.Sort(detail.Packages) + result.Packages = detail.Packages + for _, app := range detail.Applications { + sort.Sort(app.Packages) + result.Packages = append(result.Packages, app.Packages...) + } return []types.Result{result}, detail.OS, nil } @@ -328,11 +315,9 @@ func (c *Collector) scan(ctx context.Context, artifact artifact.Artifact, applie trivyReport, err := s.ScanArtifact(ctx, types.ScanOptions{ ScanRemovedPackages: false, - PkgTypes: []types.PkgType{types.PkgTypeOS}, - PkgRelationships: []ftypes.Relationship{ - ftypes.RelationshipUnknown, - }, - Scanners: types.Scanners{types.SBOMScanner}, + PkgTypes: []types.PkgType{types.PkgTypeOS, types.PkgTypeLibrary}, + PkgRelationships: ftypes.Relationships, + Scanners: types.Scanners{types.SBOMScanner}, }) if err != nil { return nil, err From 7c637fe800303e1ce6cb0267c4b626f54ee6c733 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 28 Jan 2025 12:58:18 +0100 Subject: [PATCH 12/97] [CWS] pass the missing logs compression component to the direct sender (#33462) --- cmd/system-probe/api/module/loader.go | 10 ++++++---- cmd/system-probe/api/server.go | 5 +++-- cmd/system-probe/subcommands/run/command.go | 16 ++++++++++------ .../servicediscovery/module/impl_linux_test.go | 2 +- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/cmd/system-probe/api/module/loader.go b/cmd/system-probe/api/module/loader.go index 5e028b0d538f73..4fa5b4777ee87a 100644 --- a/cmd/system-probe/api/module/loader.go +++ b/cmd/system-probe/api/module/loader.go @@ -19,6 +19,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -64,7 +65,7 @@ func withModule(name sysconfigtypes.ModuleName, fn func()) { // * Initialization using the provided Factory; // * Registering the HTTP endpoints of each module; // * Register the gRPC server; -func Register(cfg *sysconfigtypes.Config, httpMux *mux.Router, factories []Factory, wmeta workloadmeta.Component, tagger tagger.Component, telemetry telemetry.Component) error { +func Register(cfg *sysconfigtypes.Config, httpMux *mux.Router, factories []Factory, wmeta workloadmeta.Component, tagger tagger.Component, telemetry telemetry.Component, compression logscompression.Component) error { var enabledModulesFactories []Factory for _, factory := range factories { if !cfg.ModuleIsEnabled(factory.Name) { @@ -83,9 +84,10 @@ func Register(cfg *sysconfigtypes.Config, httpMux *mux.Router, factories []Facto var module Module withModule(factory.Name, func() { deps := FactoryDependencies{ - WMeta: wmeta, - Tagger: tagger, - Telemetry: telemetry, + WMeta: wmeta, + Tagger: tagger, + Telemetry: telemetry, + Compression: compression, } module, err = factory.Fn(cfg, deps) }) diff --git a/cmd/system-probe/api/server.go b/cmd/system-probe/api/server.go index f0fbe81919f307..ec5e5ec870431e 100644 --- a/cmd/system-probe/api/server.go +++ b/cmd/system-probe/api/server.go @@ -25,12 +25,13 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" ) // StartServer starts the HTTP and gRPC servers for the system-probe, which registers endpoints from all enabled modules. -func StartServer(cfg *sysconfigtypes.Config, telemetry telemetry.Component, wmeta workloadmeta.Component, tagger tagger.Component, settings settings.Component) error { +func StartServer(cfg *sysconfigtypes.Config, telemetry telemetry.Component, wmeta workloadmeta.Component, tagger tagger.Component, settings settings.Component, compression logscompression.Component) error { conn, err := server.NewListener(cfg.SocketAddress) if err != nil { return err @@ -38,7 +39,7 @@ func StartServer(cfg *sysconfigtypes.Config, telemetry telemetry.Component, wmet mux := gorilla.NewRouter() - err = module.Register(cfg, mux, modules.All, wmeta, tagger, telemetry) + err = module.Register(cfg, mux, modules.All, wmeta, tagger, telemetry, compression) if err != nil { return fmt.Errorf("failed to create system probe: %s", err) } diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index f5d2990b5b0abb..5b43235f743fcf 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -53,6 +53,8 @@ import ( compstatsd "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/rcclientimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -143,6 +145,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { } }), settingsimpl.Module(), + logscompressionfx.Module(), ) }, } @@ -152,7 +155,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { } // run starts the main loop. -func run(log log.Component, _ config.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, rcclient rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, _ pid.Component, _ healthprobe.Component, _ autoexit.Component, settings settings.Component) error { +func run(log log.Component, _ config.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, rcclient rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, _ pid.Component, _ healthprobe.Component, _ autoexit.Component, settings settings.Component, compression logscompression.Component) error { defer func() { stopSystemProbe() }() @@ -194,7 +197,7 @@ func run(log log.Component, _ config.Component, statsd compstatsd.Component, tel } }() - if err := startSystemProbe(log, statsd, telemetry, sysprobeconfig, rcclient, wmeta, tagger, settings); err != nil { + if err := startSystemProbe(log, statsd, telemetry, sysprobeconfig, rcclient, wmeta, tagger, settings, compression); err != nil { if errors.Is(err, ErrNotEnabled) { // A sleep is necessary to ensure that supervisor registers this process as "STARTED" // If the exit is "too quick", we enter a BACKOFF->FATAL loop even though this is an expected exit @@ -238,9 +241,9 @@ func StartSystemProbeWithDefaults(ctxChan <-chan context.Context) (<-chan error, func runSystemProbe(ctxChan <-chan context.Context, errChan chan error) error { return fxutil.OneShot( - func(log log.Component, _ config.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, rcclient rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, _ healthprobe.Component, settings settings.Component) error { + func(log log.Component, _ config.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, rcclient rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, _ healthprobe.Component, settings settings.Component, compression logscompression.Component) error { defer StopSystemProbeWithDefaults() - err := startSystemProbe(log, statsd, telemetry, sysprobeconfig, rcclient, wmeta, tagger, settings) + err := startSystemProbe(log, statsd, telemetry, sysprobeconfig, rcclient, wmeta, tagger, settings, compression) if err != nil { return err } @@ -311,6 +314,7 @@ func runSystemProbe(ctxChan <-chan context.Context, errChan chan error) error { } }), settingsimpl.Module(), + logscompressionfx.Module(), ) } @@ -320,7 +324,7 @@ func StopSystemProbeWithDefaults() { } // startSystemProbe Initializes the system-probe process -func startSystemProbe(log log.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, _ rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, settings settings.Component) error { +func startSystemProbe(log log.Component, statsd compstatsd.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, _ rcclient.Component, wmeta workloadmeta.Component, tagger tagger.Component, settings settings.Component, compression logscompression.Component) error { var err error cfg := sysprobeconfig.SysProbeObject() @@ -380,7 +384,7 @@ func startSystemProbe(log log.Component, statsd compstatsd.Component, telemetry }() } - if err = api.StartServer(cfg, telemetry, wmeta, tagger, settings); err != nil { + if err = api.StartServer(cfg, telemetry, wmeta, tagger, settings, compression); err != nil { return log.Criticalf("error while starting api server, exiting: %v", err) } return nil diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 1f388f538cd58d..9cbfaae0fc9a95 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -92,7 +92,7 @@ func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContai return false }, } - err := module.Register(cfg, mux, []module.Factory{m}, wmeta, tagger, nil) + err := module.Register(cfg, mux, []module.Factory{m}, wmeta, tagger, nil, nil) require.NoError(t, err) srv := httptest.NewServer(mux) From 738a3e184847f0b80333e33d0318324c7d75d145 Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Tue, 28 Jan 2025 13:29:12 +0100 Subject: [PATCH 13/97] fix(installer): Properly log on systemd unexpected order (#33457) --- test/new-e2e/tests/installer/host/systemd.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/new-e2e/tests/installer/host/systemd.go b/test/new-e2e/tests/installer/host/systemd.go index bc38e1d62f61c7..9145dce11c5da0 100644 --- a/test/new-e2e/tests/installer/host/systemd.go +++ b/test/new-e2e/tests/installer/host/systemd.go @@ -123,8 +123,7 @@ func (h *Host) AssertSystemdEvents(since JournaldTimestamp, events SystemdEventS } for unit := range units { - h.t.Logf("--- Logs for unit %s:", unit) - h.remote.MustExecute(fmt.Sprintf("sudo journalctl -xeu %s", unit)) + h.t.Logf("--- Logs for unit %s:\n%s", unit, h.remote.MustExecute(fmt.Sprintf("sudo journalctl -xeu %s", unit))) } } } From 5c6630ca861b7c2fd205bfa83e98e1cfd2a1fcf6 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 28 Jan 2025 13:41:18 +0100 Subject: [PATCH 14/97] [CWS] preallocate slice in `UnmarshalStringArray` (#33400) --- pkg/security/secl/model/utils.go | 18 +++++++++++++++++- pkg/security/secl/model/utils_test.go | 16 ++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/pkg/security/secl/model/utils.go b/pkg/security/secl/model/utils.go index 28ef16a374ab5d..be686dc52ceed7 100644 --- a/pkg/security/secl/model/utils.go +++ b/pkg/security/secl/model/utils.go @@ -22,9 +22,25 @@ func SliceToArray(src []byte, dst []byte) { // UnmarshalStringArray extract array of string for array of byte func UnmarshalStringArray(data []byte) ([]string, error) { - var result []string length := uint32(len(data)) + prealloc := 0 + for i := uint32(0); i < length; { + if i+4 >= length { + break + } + // size of arg + n := binary.NativeEndian.Uint32(data[i : i+4]) + if n == 0 { + break + } + i += 4 + i += n + prealloc++ + } + + result := make([]string, 0, prealloc) + for i := uint32(0); i < length; { if i+4 >= length { return result, ErrStringArrayOverflow diff --git a/pkg/security/secl/model/utils_test.go b/pkg/security/secl/model/utils_test.go index 1760568801fa79..ea3fce6a60edf9 100644 --- a/pkg/security/secl/model/utils_test.go +++ b/pkg/security/secl/model/utils_test.go @@ -7,6 +7,7 @@ package model import ( + "encoding/binary" "runtime" "testing" @@ -31,3 +32,18 @@ func BenchmarkNullTerminatedString(b *testing.B) { } runtime.KeepAlive(s) } + +func BenchmarkUnmarshalStringArray(b *testing.B) { + var data []byte + for range 4096 { + data = binary.NativeEndian.AppendUint32(data, 4) + data = append(data, []byte("test")...) + } + + for i := 0; i < b.N; i++ { + _, err := UnmarshalStringArray(data) + if err != nil { + b.Fatal(err) + } + } +} From ba7307962eb2ef056f712b9f39c2e48078e97b0f Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Tue, 28 Jan 2025 13:54:05 +0100 Subject: [PATCH 15/97] [HA Agent] Rename to config_id and active_agent (#33234) --- comp/haagent/def/component.go | 4 +- comp/haagent/helpers/helpers.go | 11 ++---- comp/haagent/helpers/helpers_test.go | 12 ++---- comp/haagent/impl/config.go | 8 ++-- comp/haagent/impl/haagent.go | 14 +++---- comp/haagent/impl/haagent_test.go | 38 +++++++++---------- comp/haagent/impl/rcpayload.go | 4 +- comp/haagent/mock/mock.go | 20 +++++----- comp/metadata/host/hostimpl/hosttags/tags.go | 5 --- .../host/hostimpl/hosttags/tags_test.go | 15 -------- .../snmp/internal/devicecheck/devicecheck.go | 7 +--- .../internal/devicecheck/devicecheck_test.go | 5 +-- 12 files changed, 53 insertions(+), 90 deletions(-) diff --git a/comp/haagent/def/component.go b/comp/haagent/def/component.go index bc5038265b76fa..31f2aa4f6078dc 100644 --- a/comp/haagent/def/component.go +++ b/comp/haagent/def/component.go @@ -13,8 +13,8 @@ type Component interface { // Enabled returns true if ha_agent.enabled is set to true Enabled() bool - // GetGroup returns the value of ha_agent.group - GetGroup() string + // GetConfigID returns the value of config_id + GetConfigID() string // GetState returns current HA agent state GetState() State diff --git a/comp/haagent/helpers/helpers.go b/comp/haagent/helpers/helpers.go index 4b5e755e04936c..c630a85d85e578 100644 --- a/comp/haagent/helpers/helpers.go +++ b/comp/haagent/helpers/helpers.go @@ -15,12 +15,7 @@ func IsEnabled(agentConfig model.Reader) bool { return agentConfig.GetBool("ha_agent.enabled") } -// GetGroup returns HA Agent group -func GetGroup(agentConfig model.Reader) string { - return agentConfig.GetString("ha_agent.group") -} - -// GetHaAgentTags returns HA Agent related tags -func GetHaAgentTags(agentConfig model.Reader) []string { - return []string{"agent_group:" + GetGroup(agentConfig)} +// GetConfigID returns config_id +func GetConfigID(agentConfig model.Reader) string { + return agentConfig.GetString("config_id") } diff --git a/comp/haagent/helpers/helpers_test.go b/comp/haagent/helpers/helpers_test.go index 987ab6a5cccf4f..194d8d77068168 100644 --- a/comp/haagent/helpers/helpers_test.go +++ b/comp/haagent/helpers/helpers_test.go @@ -20,14 +20,8 @@ func TestIsEnabled(t *testing.T) { assert.True(t, IsEnabled(cfg)) } -func TestGetGroup(t *testing.T) { +func TestGetConfigID(t *testing.T) { cfg := config.NewMock(t) - cfg.SetWithoutSource("ha_agent.group", "my-group") - assert.Equal(t, "my-group", GetGroup(cfg)) -} - -func TestGetHaAgentTags(t *testing.T) { - cfg := config.NewMock(t) - cfg.SetWithoutSource("ha_agent.group", "my-group") - assert.Equal(t, []string{"agent_group:my-group"}, GetHaAgentTags(cfg)) + cfg.SetWithoutSource("config_id", "my-config-id") + assert.Equal(t, "my-config-id", GetConfigID(cfg)) } diff --git a/comp/haagent/impl/config.go b/comp/haagent/impl/config.go index ea9f54d9f16ea3..51ad52ecd7deee 100644 --- a/comp/haagent/impl/config.go +++ b/comp/haagent/impl/config.go @@ -25,13 +25,13 @@ var validHaIntegrations = map[string]bool{ } type haAgentConfigs struct { - enabled bool - group string + enabled bool + configID string } func newHaAgentConfigs(agentConfig config.Component) *haAgentConfigs { return &haAgentConfigs{ - enabled: helpers.IsEnabled(agentConfig), - group: helpers.GetGroup(agentConfig), + enabled: helpers.IsEnabled(agentConfig), + configID: helpers.GetConfigID(agentConfig), } } diff --git a/comp/haagent/impl/haagent.go b/comp/haagent/impl/haagent.go index a2c29b8ce17613..1fdd468a09132d 100644 --- a/comp/haagent/impl/haagent.go +++ b/comp/haagent/impl/haagent.go @@ -34,8 +34,8 @@ func (h *haAgentImpl) Enabled() bool { return h.haAgentConfigs.enabled } -func (h *haAgentImpl) GetGroup() string { - return h.haAgentConfigs.group +func (h *haAgentImpl) GetConfigID() string { + return h.haAgentConfigs.configID } func (h *haAgentImpl) GetState() haagent.State { @@ -103,17 +103,17 @@ func (h *haAgentImpl) onHaAgentUpdate(updates map[string]state.RawConfig, applyS }) continue } - if haAgentMsg.Group != h.GetGroup() { - h.log.Warnf("Skipping invalid HA_AGENT update %s: expected group %s, got %s", - configPath, h.GetGroup(), haAgentMsg.Group) + if haAgentMsg.ConfigID != h.GetConfigID() { + h.log.Warnf("Skipping invalid HA_AGENT update %s: expected configID %s, got %s", + configPath, h.GetConfigID(), haAgentMsg.ConfigID) applyStateCallback(configPath, state.ApplyStatus{ State: state.ApplyStateError, - Error: "group does not match", + Error: "config_id does not match", }) continue } - h.SetLeader(haAgentMsg.Leader) + h.SetLeader(haAgentMsg.ActiveAgent) h.log.Debugf("Processed config %s: %v", configPath, haAgentMsg) diff --git a/comp/haagent/impl/haagent_test.go b/comp/haagent/impl/haagent_test.go index 8cb66ce2ec0b29..59bb033de12ff6 100644 --- a/comp/haagent/impl/haagent_test.go +++ b/comp/haagent/impl/haagent_test.go @@ -18,8 +18,8 @@ import ( "go.uber.org/fx" ) -var testConfigID = "datadog/2/HA_AGENT/group-62345762794c0c0b/65f17d667fb50f8ae28a3c858bdb1be9ea994f20249c119e007c520ac115c807" -var testGroup = "testGroup01" +var testRCConfigID = "datadog/2/HA_AGENT/config-62345762794c0c0b/65f17d667fb50f8ae28a3c858bdb1be9ea994f20249c119e007c520ac115c807" +var testConfigID = "testConfig01" func Test_Enabled(t *testing.T) { tests := []struct { @@ -50,12 +50,12 @@ func Test_Enabled(t *testing.T) { } } -func Test_GetGroup(t *testing.T) { +func Test_GetConfigID(t *testing.T) { agentConfigs := map[string]interface{}{ - "ha_agent.group": "my-group-01", + "config_id": "my-configID-01", } haAgent := newTestHaAgentComponent(t, agentConfigs).Comp - assert.Equal(t, "my-group-01", haAgent.GetGroup()) + assert.Equal(t, "my-configID-01", haAgent.GetConfigID()) } func Test_GetState(t *testing.T) { @@ -119,9 +119,9 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { name: "successful update with leader matching current agent", initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`{"group":"testGroup01","leader":"my-agent-hostname"}`)}, + testRCConfigID: {Config: []byte(`{"config_id":"testConfig01","active_agent":"my-agent-hostname"}`)}, }, - expectedApplyID: testConfigID, + expectedApplyID: testRCConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateAcknowledged, }, @@ -131,9 +131,9 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { name: "successful update with leader NOT matching current agent", initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`{"group":"testGroup01","leader":"another-agent-hostname"}`)}, + testRCConfigID: {Config: []byte(`{"config_id":"testConfig01","active_agent":"another-agent-hostname"}`)}, }, - expectedApplyID: testConfigID, + expectedApplyID: testRCConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateAcknowledged, }, @@ -143,9 +143,9 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { name: "invalid payload", initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`invalid-json`)}, + testRCConfigID: {Config: []byte(`invalid-json`)}, }, - expectedApplyID: testConfigID, + expectedApplyID: testRCConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateError, Error: "error unmarshalling payload", @@ -153,15 +153,15 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { expectedAgentState: haagent.Unknown, }, { - name: "invalid group", + name: "invalid configID", initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`{"group":"invalidGroup","leader":"another-agent-hostname"}`)}, + testRCConfigID: {Config: []byte(`{"config_id":"invalidConfig","active_agent":"another-agent-hostname"}`)}, }, - expectedApplyID: testConfigID, + expectedApplyID: testRCConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateError, - Error: "group does not match", + Error: "config_id does not match", }, expectedAgentState: haagent.Unknown, }, @@ -179,7 +179,7 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { agentConfigs := map[string]interface{}{ "hostname": "my-agent-hostname", "ha_agent.enabled": true, - "ha_agent.group": testGroup, + "config_id": testConfigID, } agentConfigComponent := fxutil.Test[config.Component](t, fx.Options( config.MockModule(), @@ -221,7 +221,7 @@ func Test_haAgentImpl_ShouldRunIntegration(t *testing.T) { agentConfigs: map[string]interface{}{ "hostname": testAgentHostname, "ha_agent.enabled": true, - "ha_agent.group": testGroup, + "config_id": testConfigID, }, leader: testAgentHostname, expectShouldRunIntegration: map[string]bool{ @@ -240,7 +240,7 @@ func Test_haAgentImpl_ShouldRunIntegration(t *testing.T) { agentConfigs: map[string]interface{}{ "hostname": testAgentHostname, "ha_agent.enabled": true, - "ha_agent.group": testGroup, + "config_id": testConfigID, }, leader: "another-agent-is-active", expectShouldRunIntegration: map[string]bool{ @@ -258,7 +258,7 @@ func Test_haAgentImpl_ShouldRunIntegration(t *testing.T) { agentConfigs: map[string]interface{}{ "hostname": testAgentHostname, "ha_agent.enabled": false, - "ha_agent.group": testGroup, + "config_id": testConfigID, }, leader: testAgentHostname, expectShouldRunIntegration: map[string]bool{ diff --git a/comp/haagent/impl/rcpayload.go b/comp/haagent/impl/rcpayload.go index 10784dc801d35e..2d5ac33bd1e175 100644 --- a/comp/haagent/impl/rcpayload.go +++ b/comp/haagent/impl/rcpayload.go @@ -6,6 +6,6 @@ package haagentimpl type haAgentConfig struct { - Group string `json:"group"` - Leader string `json:"leader"` + ConfigID string `json:"config_id"` + ActiveAgent string `json:"active_agent"` } diff --git a/comp/haagent/mock/mock.go b/comp/haagent/mock/mock.go index f4d713d93f2715..10aea37f7786bc 100644 --- a/comp/haagent/mock/mock.go +++ b/comp/haagent/mock/mock.go @@ -19,13 +19,13 @@ import ( type mockHaAgent struct { Logger log.Component - group string - enabled bool - state haagent.State + configID string + enabled bool + state haagent.State } -func (m *mockHaAgent) GetGroup() string { - return m.group +func (m *mockHaAgent) GetConfigID() string { + return m.configID } func (m *mockHaAgent) Enabled() bool { @@ -37,8 +37,8 @@ func (m *mockHaAgent) SetLeader(_ string) { func (m *mockHaAgent) GetState() haagent.State { return haagent.Standby } -func (m *mockHaAgent) SetGroup(group string) { - m.group = group +func (m *mockHaAgent) SetConfigID(configID string) { + m.configID = configID } func (m *mockHaAgent) SetEnabled(enabled bool) { @@ -56,7 +56,7 @@ func (m *mockHaAgent) ShouldRunIntegration(_ string) bool { type Component interface { haagent.Component - SetGroup(string) + SetConfigID(string) SetEnabled(bool) SetState(haagent.State) } @@ -64,8 +64,8 @@ type Component interface { // NewMockHaAgent returns a new Mock func NewMockHaAgent() haagent.Component { return &mockHaAgent{ - enabled: false, - group: "group01", + enabled: false, + configID: "config01", } } diff --git a/comp/metadata/host/hostimpl/hosttags/tags.go b/comp/metadata/host/hostimpl/hosttags/tags.go index 64d06bfd835553..97a8f7ac76359a 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags.go +++ b/comp/metadata/host/hostimpl/hosttags/tags.go @@ -12,7 +12,6 @@ import ( "strings" "time" - haagenthelpers "github.com/DataDog/datadog-agent/comp/haagent/helpers" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" @@ -134,10 +133,6 @@ func Get(ctx context.Context, cached bool, conf model.Reader) *Tags { hostTags = appendToHostTags(hostTags, clusterNameTags) } - if haagenthelpers.IsEnabled(conf) { - hostTags = appendToHostTags(hostTags, haagenthelpers.GetHaAgentTags(conf)) - } - gceTags := []string{} providers := getProvidersDefinitionsFunc(conf) for { diff --git a/comp/metadata/host/hostimpl/hosttags/tags_test.go b/comp/metadata/host/hostimpl/hosttags/tags_test.go index 274250a432e460..64460410ac1437 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags_test.go +++ b/comp/metadata/host/hostimpl/hosttags/tags_test.go @@ -137,18 +137,3 @@ func TestHostTagsCache(t *testing.T) { assert.Equal(t, []string{"foo1:value1"}, hostTags.System) assert.Equal(t, 2, nbCall) } - -func TestHaAgentTags(t *testing.T) { - mockConfig, ctx := setupTest(t) - - hostTags := Get(ctx, false, mockConfig) - assert.NotNil(t, hostTags.System) - assert.Equal(t, []string{}, hostTags.System) - - mockConfig.SetWithoutSource("ha_agent.enabled", true) - mockConfig.SetWithoutSource("ha_agent.group", "my-group") - - hostTags = Get(ctx, false, mockConfig) - assert.NotNil(t, hostTags.System) - assert.Equal(t, []string{"agent_group:my-group"}, hostTags.System) -} diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go index d23d5642a24702..6f57817fddec11 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go @@ -18,7 +18,6 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/core/config" - haagenthelpers "github.com/DataDog/datadog-agent/comp/haagent/helpers" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -253,11 +252,7 @@ func (d *DeviceCheck) setDeviceHostExternalTags() { } func (d *DeviceCheck) buildExternalTags() []string { - agentTags := configUtils.GetConfiguredTags(d.agentConfig, false) - if haagenthelpers.IsEnabled(d.agentConfig) { - agentTags = append(agentTags, haagenthelpers.GetHaAgentTags(d.agentConfig)...) - } - return agentTags + return configUtils.GetConfiguredTags(d.agentConfig, false) } func (d *DeviceCheck) getValuesAndTags() (bool, []string, *valuestore.ResultValueStore, error) { diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index 1e7ad7fc7a0011..92a9eda81400f0 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -990,8 +990,7 @@ collect_topology: false assert.Nil(t, err) cfg := agentconfig.NewMock(t) - cfg.SetWithoutSource("ha_agent.enabled", true) - cfg.SetWithoutSource("ha_agent.group", "my-group") + cfg.SetWithoutSource("tags", []string{"tag1:value1"}) deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory, cfg) assert.Nil(t, err) @@ -1000,5 +999,5 @@ collect_topology: false externalTags := deviceCk.buildExternalTags() // THEN - assert.Equal(t, []string{"agent_group:my-group"}, externalTags) + assert.Equal(t, []string{"tag1:value1"}, externalTags) } From 76513db3c63eb24cd8cebb343193eb9a63830ffa Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Tue, 28 Jan 2025 13:58:17 +0100 Subject: [PATCH 16/97] [CWS] use more generic names for iterator functions (#33413) --- .../generators/accessors/accessors.tmpl | 4 +- pkg/security/secl/compiler/eval/context.go | 18 +- pkg/security/secl/model/accessors_unix.go | 516 +++++++++--------- pkg/security/secl/model/accessors_windows.go | 26 +- .../{string_array_iter.go => iterator.go} | 8 +- pkg/security/seclwin/model/accessors_win.go | 26 +- .../{string_array_iter.go => iterator.go} | 8 +- tasks/security_agent.py | 2 +- 8 files changed, 305 insertions(+), 303 deletions(-) rename pkg/security/secl/model/{string_array_iter.go => iterator.go} (71%) rename pkg/security/seclwin/model/{string_array_iter.go => iterator.go} (71%) diff --git a/pkg/security/generators/accessors/accessors.tmpl b/pkg/security/generators/accessors/accessors.tmpl index 4a197038a66bd9..040ded33fa8ab4 100644 --- a/pkg/security/generators/accessors/accessors.tmpl +++ b/pkg/security/generators/accessors/accessors.tmpl @@ -134,9 +134,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{if $Field.Handler }} {{$Event = "ev"}} {{end}} - {{$AncestorFunc := "newAncestorsIterator"}} + {{$AncestorFunc := "newIterator"}} {{if $Field.GetArrayPrefix}} - {{$AncestorFunc = "newAncestorsIteratorArray"}} + {{$AncestorFunc = "newIteratorArray"}} {{end}} results := {{$AncestorFunc}}(iterator, "{{$Field.Iterator.Name}}", ctx, {{$Event}}, func(ev *Event, current *{{$Field.Iterator.OrigType}}) {{$Field.GetArrayPrefix}}{{$Field.ReturnType}} { {{range $Check := $Checks}} diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go index b5cd17a1450ead..d1eb055aef48f8 100644 --- a/pkg/security/secl/compiler/eval/context.go +++ b/pkg/security/secl/compiler/eval/context.go @@ -23,10 +23,10 @@ type Context struct { Event Event // cache available across all the evaluations - StringCache map[string][]string - IPNetCache map[string][]net.IPNet - IntCache map[string][]int - BoolCache map[string][]bool + StringCache map[Field][]string + IPNetCache map[Field][]net.IPNet + IntCache map[Field][]int + BoolCache map[Field][]bool // iterator register cache. used to cache entry within a single rule evaluation RegisterCache map[RegisterID]*RegisterCacheEntry @@ -40,6 +40,8 @@ type Context struct { resolvedFields []string + IteratorCounters map[Field]int + Error error } @@ -81,10 +83,10 @@ func (c *Context) GetResolvedFields() []string { func NewContext(evt Event) *Context { return &Context{ Event: evt, - StringCache: make(map[string][]string), - IPNetCache: make(map[string][]net.IPNet), - IntCache: make(map[string][]int), - BoolCache: make(map[string][]bool), + StringCache: make(map[Field][]string), + IPNetCache: make(map[Field][]net.IPNet), + IntCache: make(map[Field][]int), + BoolCache: make(map[Field][]bool), Registers: make(map[RegisterID]int), RegisterCache: make(map[RegisterID]*RegisterCacheEntry), IteratorCountCache: make(map[string]int), diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index f880de9310bb2e..1ae4c89db74a49 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -4847,7 +4847,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IPNetCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { return current.Destination.IPNet }) ctx.IPNetCache[field] = results @@ -4873,7 +4873,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Destination) }) ctx.BoolCache[field] = results @@ -4898,7 +4898,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Destination.Port) }) ctx.IntCache[field] = results @@ -4923,7 +4923,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Egress.DataSize) }) ctx.IntCache[field] = results @@ -4948,7 +4948,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Egress.PacketCount) }) ctx.IntCache[field] = results @@ -4973,7 +4973,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Ingress.DataSize) }) ctx.IntCache[field] = results @@ -4998,7 +4998,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Ingress.PacketCount) }) ctx.IntCache[field] = results @@ -5023,7 +5023,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.L3Protocol) }) ctx.IntCache[field] = results @@ -5048,7 +5048,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.L4Protocol) }) ctx.IntCache[field] = results @@ -5083,7 +5083,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IPNetCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) net.IPNet { return current.Source.IPNet }) ctx.IPNetCache[field] = results @@ -5109,7 +5109,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, ev, func(ev *Event, current *Flow) bool { return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Source) }) ctx.BoolCache[field] = results @@ -5134,7 +5134,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { + results := newIterator(iterator, "NetworkFlowMonitor.Flows", ctx, nil, func(ev *Event, current *Flow) int { return int(current.Source.Port) }) ctx.IntCache[field] = results @@ -5644,7 +5644,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5669,7 +5669,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5694,7 +5694,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5719,7 +5719,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -5744,7 +5744,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5769,7 +5769,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -5793,7 +5793,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -5817,7 +5817,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -5841,7 +5841,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -5865,7 +5865,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -5889,7 +5889,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -5914,7 +5914,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -5939,7 +5939,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -5964,7 +5964,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -5988,7 +5988,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -6013,7 +6013,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6038,7 +6038,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -6062,7 +6062,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -6086,7 +6086,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -6111,7 +6111,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6136,7 +6136,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -6161,7 +6161,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -6185,7 +6185,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -6209,7 +6209,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -6237,7 +6237,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6270,7 +6270,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6302,7 +6302,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6335,7 +6335,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6368,7 +6368,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -6401,7 +6401,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -6433,7 +6433,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6465,7 +6465,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6497,7 +6497,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6529,7 +6529,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6563,7 +6563,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6593,7 +6593,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -6622,7 +6622,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6655,7 +6655,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6688,7 +6688,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6722,7 +6722,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6752,7 +6752,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -6781,7 +6781,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6813,7 +6813,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -6846,7 +6846,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -6874,7 +6874,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -6898,7 +6898,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -6922,7 +6922,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -6946,7 +6946,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -6970,7 +6970,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -6994,7 +6994,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -7022,7 +7022,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7055,7 +7055,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7087,7 +7087,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7120,7 +7120,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7153,7 +7153,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -7186,7 +7186,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -7218,7 +7218,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7250,7 +7250,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7282,7 +7282,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7314,7 +7314,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7348,7 +7348,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7378,7 +7378,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -7407,7 +7407,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7440,7 +7440,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7473,7 +7473,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7507,7 +7507,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7537,7 +7537,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -7566,7 +7566,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7598,7 +7598,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -7631,7 +7631,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -7659,7 +7659,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -7683,7 +7683,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -7708,7 +7708,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -7742,7 +7742,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -7766,7 +7766,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -7790,7 +7790,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -7814,7 +7814,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -7838,7 +7838,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -7862,7 +7862,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -7887,7 +7887,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -7912,7 +7912,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -7937,7 +7937,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -10238,7 +10238,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10263,7 +10263,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10288,7 +10288,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10313,7 +10313,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -10338,7 +10338,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10363,7 +10363,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10387,7 +10387,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -10411,7 +10411,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -10435,7 +10435,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -10459,7 +10459,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -10483,7 +10483,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -10508,7 +10508,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -10533,7 +10533,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -10558,7 +10558,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -10582,7 +10582,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -10607,7 +10607,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10632,7 +10632,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -10656,7 +10656,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -10680,7 +10680,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -10705,7 +10705,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10730,7 +10730,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -10755,7 +10755,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -10779,7 +10779,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -10803,7 +10803,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -10831,7 +10831,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -10864,7 +10864,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -10896,7 +10896,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -10929,7 +10929,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -10962,7 +10962,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -10995,7 +10995,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -11027,7 +11027,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11059,7 +11059,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11091,7 +11091,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11123,7 +11123,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11157,7 +11157,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11187,7 +11187,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -11216,7 +11216,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11249,7 +11249,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11282,7 +11282,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11316,7 +11316,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11346,7 +11346,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -11375,7 +11375,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11407,7 +11407,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11440,7 +11440,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11468,7 +11468,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -11492,7 +11492,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -11516,7 +11516,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -11540,7 +11540,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -11564,7 +11564,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -11588,7 +11588,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -11616,7 +11616,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11649,7 +11649,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11681,7 +11681,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11714,7 +11714,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11747,7 +11747,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -11780,7 +11780,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -11812,7 +11812,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11844,7 +11844,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11876,7 +11876,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11908,7 +11908,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -11942,7 +11942,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -11972,7 +11972,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -12001,7 +12001,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12034,7 +12034,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12067,7 +12067,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12101,7 +12101,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12131,7 +12131,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -12160,7 +12160,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -12192,7 +12192,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -12225,7 +12225,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -12253,7 +12253,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -12277,7 +12277,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -12302,7 +12302,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -12336,7 +12336,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -12360,7 +12360,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -12384,7 +12384,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -12408,7 +12408,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -12432,7 +12432,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -12456,7 +12456,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -12481,7 +12481,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -12506,7 +12506,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -12531,7 +12531,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -16122,7 +16122,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16147,7 +16147,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16172,7 +16172,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16197,7 +16197,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -16222,7 +16222,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16247,7 +16247,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16271,7 +16271,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results @@ -16295,7 +16295,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results @@ -16319,7 +16319,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results @@ -16343,7 +16343,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results @@ -16367,7 +16367,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results @@ -16392,7 +16392,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -16417,7 +16417,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results @@ -16442,7 +16442,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results @@ -16466,7 +16466,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results @@ -16491,7 +16491,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16516,7 +16516,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -16540,7 +16540,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results @@ -16564,7 +16564,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results @@ -16589,7 +16589,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16614,7 +16614,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -16639,7 +16639,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -16663,7 +16663,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results @@ -16687,7 +16687,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results @@ -16715,7 +16715,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16748,7 +16748,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -16780,7 +16780,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16813,7 +16813,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -16846,7 +16846,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -16879,7 +16879,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -16911,7 +16911,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16943,7 +16943,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -16975,7 +16975,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17007,7 +17007,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17041,7 +17041,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17071,7 +17071,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -17100,7 +17100,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17133,7 +17133,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17166,7 +17166,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17200,7 +17200,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17230,7 +17230,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -17259,7 +17259,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17291,7 +17291,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17324,7 +17324,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17352,7 +17352,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results @@ -17376,7 +17376,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results @@ -17400,7 +17400,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results @@ -17424,7 +17424,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results @@ -17448,7 +17448,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results @@ -17472,7 +17472,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results @@ -17500,7 +17500,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17533,7 +17533,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17565,7 +17565,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17598,7 +17598,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17631,7 +17631,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return nil @@ -17664,7 +17664,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return false @@ -17696,7 +17696,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17728,7 +17728,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17760,7 +17760,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17792,7 +17792,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -17826,7 +17826,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17856,7 +17856,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -17885,7 +17885,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17918,7 +17918,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17951,7 +17951,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -17985,7 +17985,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -18015,7 +18015,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results @@ -18044,7 +18044,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -18076,7 +18076,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return 0 @@ -18109,7 +18109,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { ctx.Error = &eval.ErrNotSupported{Field: field} return "" @@ -18137,7 +18137,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results @@ -18161,7 +18161,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results @@ -18186,7 +18186,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.BoolCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results @@ -18220,7 +18220,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -18244,7 +18244,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -18268,7 +18268,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results @@ -18292,7 +18292,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results @@ -18316,7 +18316,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results @@ -18340,7 +18340,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results @@ -18365,7 +18365,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -18390,7 +18390,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results @@ -18415,7 +18415,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go index eb63650b480ebb..10f7a420f9af91 100644 --- a/pkg/security/secl/model/accessors_windows.go +++ b/pkg/security/secl/model/accessors_windows.go @@ -884,7 +884,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -908,7 +908,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results @@ -933,7 +933,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -958,7 +958,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -983,7 +983,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1009,7 +1009,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1035,7 +1035,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1061,7 +1061,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1087,7 +1087,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1121,7 +1121,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -1145,7 +1145,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -1170,7 +1170,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1194,7 +1194,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results diff --git a/pkg/security/secl/model/string_array_iter.go b/pkg/security/secl/model/iterator.go similarity index 71% rename from pkg/security/secl/model/string_array_iter.go rename to pkg/security/secl/model/iterator.go index b2bfc5ae9d99d9..34dd77752ccf0f 100644 --- a/pkg/security/secl/model/string_array_iter.go +++ b/pkg/security/secl/model/iterator.go @@ -8,8 +8,8 @@ package model import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" -// AncestorsIterator is a generic interface that iterators must implement -type AncestorsIterator[T any] interface { +// Iterator is a generic interface that iterators must implement +type Iterator[T any] interface { Front(ctx *eval.Context) T Next(ctx *eval.Context) T At(ctx *eval.Context, regID eval.RegisterID, pos int) T @@ -22,7 +22,7 @@ func isNil[V comparable](v V) bool { return v == zero } -func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { +func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)) @@ -32,7 +32,7 @@ func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field return results } -func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { +func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go index 38084deace9826..72d3c6801658e9 100644 --- a/pkg/security/seclwin/model/accessors_win.go +++ b/pkg/security/seclwin/model/accessors_win.go @@ -882,7 +882,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -906,7 +906,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results @@ -931,7 +931,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results @@ -956,7 +956,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -981,7 +981,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1007,7 +1007,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1033,7 +1033,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1059,7 +1059,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results @@ -1085,7 +1085,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results @@ -1119,7 +1119,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results @@ -1143,7 +1143,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.IntCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results @@ -1168,7 +1168,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results @@ -1192,7 +1192,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval if result, ok := ctx.StringCache[field]; ok { return result } - results := newAncestorsIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results diff --git a/pkg/security/seclwin/model/string_array_iter.go b/pkg/security/seclwin/model/iterator.go similarity index 71% rename from pkg/security/seclwin/model/string_array_iter.go rename to pkg/security/seclwin/model/iterator.go index b2bfc5ae9d99d9..34dd77752ccf0f 100644 --- a/pkg/security/seclwin/model/string_array_iter.go +++ b/pkg/security/seclwin/model/iterator.go @@ -8,8 +8,8 @@ package model import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" -// AncestorsIterator is a generic interface that iterators must implement -type AncestorsIterator[T any] interface { +// Iterator is a generic interface that iterators must implement +type Iterator[T any] interface { Front(ctx *eval.Context) T Next(ctx *eval.Context) T At(ctx *eval.Context, regID eval.RegisterID, pos int) T @@ -22,7 +22,7 @@ func isNil[V comparable](v V) bool { return v == zero } -func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { +func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { results = append(results, perIter(ev, entry)) @@ -32,7 +32,7 @@ func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field return results } -func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { +func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { diff --git a/tasks/security_agent.py b/tasks/security_agent.py index 5f3619747e22c2..40b7f9aed9c13f 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -815,7 +815,7 @@ def sync_secl_win_pkg(ctx): ("accessors_windows.go", "accessors_win.go"), ("legacy_secl.go", None), ("security_profile.go", None), - ("string_array_iter.go", None), + ("iterator.go", None), ] ctx.run("rm -r pkg/security/seclwin/model") From 290cafeafaa1ac20f96f43c07614346a15310ce2 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Tue, 28 Jan 2025 15:06:19 +0100 Subject: [PATCH 17/97] fix(renovate): deactivate the default configuration (#33470) --- renovate.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/renovate.json b/renovate.json index ff1f0dc60d2fa0..f7bbfee23161e6 100644 --- a/renovate.json +++ b/renovate.json @@ -1,8 +1,5 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:recommended" - ], "customManagers" : [ { "customType": "regex", From 481e505640ef36fbcad9e2c8c60057363ef3a3f8 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 28 Jan 2025 15:30:45 +0100 Subject: [PATCH 18/97] [CWS/CSPM] remove unneeded build tags from security agent (#33427) --- .../subcommands/check/command.go | 26 ++--------------- .../subcommands/check/command_unsupported.go | 2 +- .../check/resolver_cluster_agent.go | 28 +++++++++++++++++++ .../check/resolver_security_agent.go | 15 ++++++++++ .../subcommands/compliance/command_test.go | 10 +++++-- tasks/build_tags.py | 5 ---- 6 files changed, 54 insertions(+), 32 deletions(-) create mode 100644 cmd/security-agent/subcommands/check/resolver_cluster_agent.go create mode 100644 cmd/security-agent/subcommands/check/resolver_security_agent.go diff --git a/cmd/security-agent/subcommands/check/command.go b/cmd/security-agent/subcommands/check/command.go index 78cd3b1cbefefc..8074677ddfcde7 100644 --- a/cmd/security-agent/subcommands/check/command.go +++ b/cmd/security-agent/subcommands/check/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build !windows && kubeapiserver +//go:build !windows // Package check holds check related files package check @@ -14,12 +14,9 @@ import ( "fmt" "os" "path/filepath" - "time" "github.com/spf13/cobra" "go.uber.org/fx" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" ddgostatsd "github.com/DataDog/datadog-go/v5/statsd" @@ -37,10 +34,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" - "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" ) // CliParams needs to be exported because the compliance subcommand is tightly coupled to this subcommand and tests need to be able to access this type. @@ -140,20 +135,13 @@ func RunCheck(log log.Component, config config.Component, _ secrets.Component, s var resolver compliance.Resolver if checkArgs.overrideRegoInput != "" { resolver = newFakeResolver(checkArgs.overrideRegoInput) - } else if flavor.GetFlavor() == flavor.ClusterAgent { - resolver = compliance.NewResolver(context.Background(), compliance.ResolverOptions{ - Hostname: hname, - DockerProvider: compliance.DefaultDockerProvider, - LinuxAuditProvider: compliance.DefaultLinuxAuditProvider, - KubernetesProvider: complianceKubernetesProvider, - StatsdClient: statsdClient, - }) } else { resolver = compliance.NewResolver(context.Background(), compliance.ResolverOptions{ Hostname: hname, HostRoot: os.Getenv("HOST_ROOT"), DockerProvider: compliance.DefaultDockerProvider, LinuxAuditProvider: compliance.DefaultLinuxAuditProvider, + KubernetesProvider: complianceKubernetesProvider, StatsdClient: statsdClient, }) } @@ -261,16 +249,6 @@ func reportComplianceEvents(log log.Component, events []*compliance.CheckEvent, return nil } -func complianceKubernetesProvider(_ctx context.Context) (dynamic.Interface, discovery.DiscoveryInterface, error) { - ctx, cancel := context.WithTimeout(_ctx, 2*time.Second) - defer cancel() - apiCl, err := apiserver.WaitForAPIClient(ctx) - if err != nil { - return nil, nil, err - } - return apiCl.DynamicCl, apiCl.Cl.Discovery(), nil -} - type fakeResolver struct { regoInputPath string } diff --git a/cmd/security-agent/subcommands/check/command_unsupported.go b/cmd/security-agent/subcommands/check/command_unsupported.go index 0126e53f15ce6f..190a54b1ce256f 100644 --- a/cmd/security-agent/subcommands/check/command_unsupported.go +++ b/cmd/security-agent/subcommands/check/command_unsupported.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build windows || !kubeapiserver +//go:build windows // Package check holds check related files package check diff --git a/cmd/security-agent/subcommands/check/resolver_cluster_agent.go b/cmd/security-agent/subcommands/check/resolver_cluster_agent.go new file mode 100644 index 00000000000000..fc5ae6cdbc061b --- /dev/null +++ b/cmd/security-agent/subcommands/check/resolver_cluster_agent.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows && kubeapiserver + +// Package check holds check related files +package check + +import ( + "context" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" +) + +func complianceKubernetesProvider(_ctx context.Context) (dynamic.Interface, discovery.DiscoveryInterface, error) { + ctx, cancel := context.WithTimeout(_ctx, 2*time.Second) + defer cancel() + apiCl, err := apiserver.WaitForAPIClient(ctx) + if err != nil { + return nil, nil, err + } + return apiCl.DynamicCl, apiCl.Cl.Discovery(), nil +} diff --git a/cmd/security-agent/subcommands/check/resolver_security_agent.go b/cmd/security-agent/subcommands/check/resolver_security_agent.go new file mode 100644 index 00000000000000..758b99b7d5d64b --- /dev/null +++ b/cmd/security-agent/subcommands/check/resolver_security_agent.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows && !kubeapiserver + +// Package check holds check related files +package check + +import ( + "github.com/DataDog/datadog-agent/pkg/compliance" +) + +var complianceKubernetesProvider compliance.KubernetesProvider diff --git a/cmd/security-agent/subcommands/compliance/command_test.go b/cmd/security-agent/subcommands/compliance/command_test.go index 4df080dbef579e..cced3dc43986a6 100644 --- a/cmd/security-agent/subcommands/compliance/command_test.go +++ b/cmd/security-agent/subcommands/compliance/command_test.go @@ -8,9 +8,11 @@ package compliance import ( - "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "runtime" "testing" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/cmd/security-agent/command" @@ -45,7 +47,11 @@ func TestEventCommands(t *testing.T) { subcommandNames = append(subcommandNames, subcommand.Use) } - require.Equal(t, []string{"event", "load "}, subcommandNames, "subcommand missing") + if runtime.GOOS == "windows" { + require.Equal(t, []string{"event", "load "}, subcommandNames, "subcommand missing") + } else { + require.Equal(t, []string{"check", "event", "load "}, subcommandNames, "subcommand missing") + } fxutil.TestOneShotSubcommand(t, Commands(&command.GlobalParams{}), diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 665a54518fde8e..8fcd60cd99fbc2 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -151,11 +151,6 @@ "netcgo", "datadog.no_waf", "docker", - "containerd", - "no_dynamic_plugins", - "kubeapiserver", - "kubelet", - "podman", "zlib", "zstd", "ec2", From b9eb4a22c1990372956900b57002710dfbe89d89 Mon Sep 17 00:00:00 2001 From: val06 Date: Tue, 28 Jan 2025 16:36:14 +0200 Subject: [PATCH 19/97] [GPU] removed redundant internal telemetry counter (#33464) --- pkg/collector/corechecks/gpu/gpu.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pkg/collector/corechecks/gpu/gpu.go b/pkg/collector/corechecks/gpu/gpu.go index aa794d88d8a86d..a971f15304e47e 100644 --- a/pkg/collector/corechecks/gpu/gpu.go +++ b/pkg/collector/corechecks/gpu/gpu.go @@ -54,7 +54,6 @@ type Check struct { type checkTelemetry struct { nvmlMetricsSent telemetry.Counter collectorErrors telemetry.Counter - sysprobeChecks telemetry.Counter activeMetrics telemetry.Gauge sysprobeMetricsSent telemetry.Counter } @@ -77,13 +76,11 @@ func newCheck(tagger tagger.Component, telemetry telemetry.Component) check.Chec } func newCheckTelemetry(tm telemetry.Component) *checkTelemetry { - subsystem := CheckName return &checkTelemetry{ - nvmlMetricsSent: tm.NewCounter(subsystem, "nvml_metrics_sent", []string{"collector"}, "Number of NVML metrics sent"), - collectorErrors: tm.NewCounter(subsystem, "collector_errors", []string{"collector"}, "Number of errors from NVML collectors"), - sysprobeChecks: tm.NewCounter(subsystem, "sysprobe_checks", []string{"status"}, "Number of sysprobe checks, by status"), - activeMetrics: tm.NewGauge(subsystem, "active_metrics", nil, "Number of active metrics"), - sysprobeMetricsSent: tm.NewCounter(subsystem, "sysprobe_metrics_sent", nil, "Number of metrics sent based on system probe data"), + nvmlMetricsSent: tm.NewCounter(CheckName, "nvml_metrics_sent", []string{"collector"}, "Number of NVML metrics sent"), + collectorErrors: tm.NewCounter(CheckName, "collector_errors", []string{"collector"}, "Number of errors from NVML collectors"), + activeMetrics: tm.NewGauge(CheckName, "active_metrics", nil, "Number of active metrics"), + sysprobeMetricsSent: tm.NewCounter(CheckName, "sysprobe_metrics_sent", nil, "Number of metrics sent based on system probe data"), } } @@ -148,10 +145,8 @@ func (c *Check) Run() error { func (c *Check) emitSysprobeMetrics(snd sender.Sender) error { stats, err := sysprobeclient.GetCheck[model.GPUStats](c.sysProbeClient, sysconfig.GPUMonitoringModule) if err != nil { - c.telemetry.sysprobeChecks.Add(1, "error") return fmt.Errorf("cannot get data from system-probe: %w", err) } - c.telemetry.sysprobeChecks.Add(1, "success") // Set all metrics to inactive, so we can remove the ones that we don't see // and send the final metrics From f562fb568001fc82117bb6f561d73bca32a45465 Mon Sep 17 00:00:00 2001 From: Andrew Glaude Date: Tue, 28 Jan 2025 10:40:39 -0500 Subject: [PATCH 20/97] APM: Reduce number of allocs from sampler metrics (#33417) --- pkg/trace/sampler/metrics.go | 33 +++++++++++++---------- pkg/trace/sampler/prioritysampler_test.go | 13 ++++----- pkg/trace/sampler/sampler.go | 14 +++++----- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/pkg/trace/sampler/metrics.go b/pkg/trace/sampler/metrics.go index 76425ab80e0d11..937e1023c837c7 100644 --- a/pkg/trace/sampler/metrics.go +++ b/pkg/trace/sampler/metrics.go @@ -24,28 +24,33 @@ type metrics struct { value map[metricsKey]metricsValue } -type metricsKey [3]string +type metricsKey struct { + targetService string + targetEnv string + samplingPriority string +} func newMetricsKey(service, env string, samplingPriority *SamplingPriority) metricsKey { - var key metricsKey - if service != "" { - key[0] = "target_service:" + service - } - if env != "" { - key[1] = "target_env:" + env + mk := metricsKey{ + targetService: service, + targetEnv: env, } if samplingPriority != nil { - key[2] = samplingPriority.tag() + mk.samplingPriority = samplingPriority.tagValue() } - return key + return mk } func (k metricsKey) tags() []string { - tags := make([]string, 0, len(k)) - for _, v := range k { - if v != "" { - tags = append(tags, v) - } + tags := make([]string, 0, 3) // Pre-allocate number of fields for efficiency + if k.targetService != "" { + tags = append(tags, "target_service:"+k.targetService) + } + if k.targetEnv != "" { + tags = append(tags, "target_env:"+k.targetEnv) + } + if k.samplingPriority != "" { + tags = append(tags, "sampling_priority:"+k.samplingPriority) } return tags } diff --git a/pkg/trace/sampler/prioritysampler_test.go b/pkg/trace/sampler/prioritysampler_test.go index f80d0a854ff975..21b2b7820896e7 100644 --- a/pkg/trace/sampler/prioritysampler_test.go +++ b/pkg/trace/sampler/prioritysampler_test.go @@ -10,13 +10,14 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-go/v5/statsd" mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "go.uber.org/atomic" ) func randomTraceID() uint64 { @@ -89,7 +90,7 @@ func TestPrioritySample(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.priority.tag(), func(t *testing.T) { + t.Run(tt.priority.tagValue(), func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() statsdClient := mockStatsd.NewMockClientInterface(ctrl) @@ -107,7 +108,7 @@ func TestPrioritySample(t *testing.T) { if tt.priority == PriorityNone { expectedTagsA = append(expectedTagsA, "sampling_priority:auto_drop") } else { - expectedTagsA = append(expectedTagsA, tt.priority.tag()) + expectedTagsA = append(expectedTagsA, "sampling_priority:"+tt.priority.tagValue()) } chunkA, rootA := getTestTraceWithService("service-a", s) chunkA.Priority = int32(tt.priority) @@ -121,7 +122,7 @@ func TestPrioritySample(t *testing.T) { if tt.priority == PriorityNone { expectedTagsB = append(expectedTagsB, "sampling_priority:auto_drop") } else { - expectedTagsB = append(expectedTagsB, tt.priority.tag()) + expectedTagsB = append(expectedTagsB, "sampling_priority:"+tt.priority.tagValue()) } chunkB, rootB := getTestTraceWithService("service-b", s) chunkB.Priority = int32(tt.priority) diff --git a/pkg/trace/sampler/sampler.go b/pkg/trace/sampler/sampler.go index a9fb50257f3e68..19f99314752dff 100644 --- a/pkg/trace/sampler/sampler.go +++ b/pkg/trace/sampler/sampler.go @@ -68,21 +68,19 @@ const ( samplerHasher = uint64(1111111111111111111) ) -func (s SamplingPriority) tag() string { - var v string +func (s SamplingPriority) tagValue() string { switch s { case PriorityUserDrop: - v = "manual_drop" + return "manual_drop" case PriorityAutoDrop: - v = "auto_drop" + return "auto_drop" case PriorityAutoKeep: - v = "auto_keep" + return "auto_keep" case PriorityUserKeep: - v = "manual_keep" + return "manual_keep" default: - v = "none" + return "none" } - return "sampling_priority:" + v } // SampleByRate returns whether to keep a trace, based on its ID and a sampling rate. From 1cb455d08df488f5d5ecd62e14c7b617746051f8 Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Tue, 28 Jan 2025 16:42:37 +0100 Subject: [PATCH 21/97] fix(installer): Synchronously terminate the installer experiment (#33459) --- .../installer/packages/embedded/datadog-installer-exp.service | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/fleet/installer/packages/embedded/datadog-installer-exp.service b/pkg/fleet/installer/packages/embedded/datadog-installer-exp.service index 6c37f99622e04b..81f57329ff2def 100644 --- a/pkg/fleet/installer/packages/embedded/datadog-installer-exp.service +++ b/pkg/fleet/installer/packages/embedded/datadog-installer-exp.service @@ -10,6 +10,7 @@ Type=oneshot PIDFile=/opt/datadog-packages/run/installer-exp.pid ExecStart=/opt/datadog-packages/datadog-installer/experiment/bin/installer/installer run -p /opt/datadog-packages/run/installer-exp.pid ExecStart=/bin/false +ExecStop=/usr/bin/tail --pid $MAINPID -f /dev/null ExecStop=/bin/false [Install] From 7ac9ccd58c07664c076ba08bd45cf4a342cb0c40 Mon Sep 17 00:00:00 2001 From: Stuart Geipel Date: Tue, 28 Jan 2025 11:00:50 -0500 Subject: [PATCH 22/97] [NPM-4131] Use module for network path payload (#33336) --- go.mod | 4 ++++ go.work | 2 ++ modules.yml | 2 ++ pkg/network/event_common.go | 9 +++------ pkg/network/payload/go.mod | 3 +++ pkg/network/payload/types.go | 17 +++++++++++++++++ pkg/networkpath/payload/go.mod | 10 ++++++++++ pkg/networkpath/payload/go.sum | 2 ++ pkg/networkpath/payload/pathevent.go | 4 ++-- 9 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 pkg/network/payload/go.mod create mode 100644 pkg/network/payload/types.go create mode 100644 pkg/networkpath/payload/go.mod create mode 100644 pkg/networkpath/payload/go.sum diff --git a/go.mod b/go.mod index 774f2c188374cf..8c8e11265e4594 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/logs/status/utils => ./pkg/logs/status/utils github.com/DataDog/datadog-agent/pkg/logs/util/testutils => ./pkg/logs/util/testutils github.com/DataDog/datadog-agent/pkg/metrics => ./pkg/metrics/ + github.com/DataDog/datadog-agent/pkg/network/payload => ./pkg/network/payload github.com/DataDog/datadog-agent/pkg/networkdevice/profile => ./pkg/networkdevice/profile + github.com/DataDog/datadog-agent/pkg/networkpath/payload => ./pkg/networkpath/payload github.com/DataDog/datadog-agent/pkg/obfuscate => ./pkg/obfuscate github.com/DataDog/datadog-agent/pkg/orchestrator/model => ./pkg/orchestrator/model github.com/DataDog/datadog-agent/pkg/process/util/api => ./pkg/process/util/api @@ -575,6 +577,8 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.64.0-devel github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 github.com/DataDog/datadog-agent/pkg/fips v0.0.0 // indirect + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/networkpath/payload v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.64.0-devel github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.11 diff --git a/go.work b/go.work index b119a5ef5eb4ab..8d0446c3cbaf38 100644 --- a/go.work +++ b/go.work @@ -91,7 +91,9 @@ use ( pkg/logs/status/utils pkg/logs/util/testutils pkg/metrics + pkg/network/payload pkg/networkdevice/profile + pkg/networkpath/payload pkg/obfuscate pkg/orchestrator/model pkg/process/util/api diff --git a/modules.yml b/modules.yml index 65242341207d63..6a20af65d1bf10 100644 --- a/modules.yml +++ b/modules.yml @@ -188,7 +188,9 @@ modules: used_by_otel: true pkg/metrics: used_by_otel: true + pkg/network/payload: default pkg/networkdevice/profile: default + pkg/networkpath/payload: default pkg/obfuscate: used_by_otel: true pkg/orchestrator/model: diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go index 8a268e4508d2b3..246fb5a863e64b 100644 --- a/pkg/network/event_common.go +++ b/pkg/network/event_common.go @@ -18,6 +18,7 @@ import ( "go4.org/intern" "github.com/DataDog/datadog-agent/pkg/network/dns" + networkpayload "github.com/DataDog/datadog-agent/pkg/network/payload" "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/kafka" @@ -298,14 +299,10 @@ type ConnectionStats struct { } // Via has info about the routing decision for a flow -type Via struct { - Subnet Subnet `json:"subnet,omitempty"` -} +type Via = networkpayload.Via // Subnet stores info about a subnet -type Subnet struct { - Alias string `json:"alias,omitempty"` -} +type Subnet = networkpayload.Subnet // IPTranslation can be associated with a connection to show the connection is NAT'd type IPTranslation struct { diff --git a/pkg/network/payload/go.mod b/pkg/network/payload/go.mod new file mode 100644 index 00000000000000..eb835098c4be05 --- /dev/null +++ b/pkg/network/payload/go.mod @@ -0,0 +1,3 @@ +module github.com/DataDog/datadog-agent/pkg/network/payload + +go 1.23.0 diff --git a/pkg/network/payload/types.go b/pkg/network/payload/types.go new file mode 100644 index 00000000000000..34931f5952b281 --- /dev/null +++ b/pkg/network/payload/types.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025-present Datadog, Inc. + +// Package payload separates network types used as JSON payloads into a module +package payload + +// Via has info about the routing decision for a flow +type Via struct { + Subnet Subnet `json:"subnet,omitempty"` +} + +// Subnet stores info about a subnet +type Subnet struct { + Alias string `json:"alias,omitempty"` +} diff --git a/pkg/networkpath/payload/go.mod b/pkg/networkpath/payload/go.mod new file mode 100644 index 00000000000000..d0e9b4de48c4d2 --- /dev/null +++ b/pkg/networkpath/payload/go.mod @@ -0,0 +1,10 @@ +module github.com/DataDog/datadog-agent/pkg/networkpath/payload + +go 1.23.0 + +replace github.com/DataDog/datadog-agent/pkg/network/payload => ../../network/payload + +require ( + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-00010101000000-000000000000 + github.com/google/uuid v1.6.0 +) diff --git a/pkg/networkpath/payload/go.sum b/pkg/networkpath/payload/go.sum new file mode 100644 index 00000000000000..7790d7c3e03900 --- /dev/null +++ b/pkg/networkpath/payload/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/pkg/networkpath/payload/pathevent.go b/pkg/networkpath/payload/pathevent.go index 121461c10a7398..f85e70f7f458b1 100644 --- a/pkg/networkpath/payload/pathevent.go +++ b/pkg/networkpath/payload/pathevent.go @@ -6,7 +6,7 @@ // Package payload contains Network Path payload package payload -import "github.com/DataDog/datadog-agent/pkg/network" +import "github.com/DataDog/datadog-agent/pkg/network/payload" // Protocol defines supported network protocols // Please define new protocols based on the Keyword from: @@ -48,7 +48,7 @@ type NetworkPathHop struct { // about the source of a path type NetworkPathSource struct { Hostname string `json:"hostname"` - Via *network.Via `json:"via,omitempty"` + Via *payload.Via `json:"via,omitempty"` NetworkID string `json:"network_id,omitempty"` // Today this will be a VPC ID since we only resolve AWS resources Service string `json:"service,omitempty"` ContainerID string `json:"container_id,omitempty"` From 541cc4f57c0c8b933f00fbd16a6116e36bae54bf Mon Sep 17 00:00:00 2001 From: Andrew Glaude Date: Tue, 28 Jan 2025 11:04:50 -0500 Subject: [PATCH 23/97] APM: fix big pid num formatting bug (APMSP-1755) (#33426) --- pkg/trace/info/info.go | 5 +++-- pkg/trace/info/testdata/okay.json | 2 +- pkg/trace/info/testdata/psp.json | 2 +- pkg/trace/info/testdata/warning.json | 2 +- .../notes/traceagent-pidnum-0cc687e28addae67.yaml | 11 +++++++++++ 5 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 releasenotes/notes/traceagent-pidnum-0cc687e28addae67.yaml diff --git a/pkg/trace/info/info.go b/pkg/trace/info/info.go index df86c40587fdc1..0012b285416a57 100644 --- a/pkg/trace/info/info.go +++ b/pkg/trace/info/info.go @@ -16,6 +16,7 @@ import ( "os" "regexp" "slices" + "strconv" "strings" "sync" "text/template" @@ -205,7 +206,7 @@ func InitInfo(conf *config.AgentConfig) error { // automatically ignore extra fields. type StatusInfo struct { CmdLine []string `json:"cmdline"` - Pid int `json:"pid"` + Pid string `json:"pid"` Uptime int `json:"uptime"` MemStats struct { Alloc uint64 @@ -327,7 +328,7 @@ func initInfo(conf *config.AgentConfig) error { return fmt.Sprintf("%02.1f", v*100) }, } - expvar.NewInt("pid").Set(int64(os.Getpid())) + expvar.NewString("pid").Set(strconv.Itoa(os.Getpid())) expvar.Publish("uptime", expvar.Func(publishUptime)) expvar.Publish("version", expvar.Func(publishVersion)) expvar.Publish("receiver", expvar.Func(publishReceiverStats)) diff --git a/pkg/trace/info/testdata/okay.json b/pkg/trace/info/testdata/okay.json index e085c689a95227..871e8bf240b408 100644 --- a/pkg/trace/info/testdata/okay.json +++ b/pkg/trace/info/testdata/okay.json @@ -4,7 +4,7 @@ "trace_writer": {"Payloads":4,"Bytes":3245,"Traces":26,"Events":123,"Errors":0}, "stats_writer": {"Payloads":6,"Bytes":8329,"StatsBuckets":12,"Errors":0}, "memstats": {"Alloc":773552,"TotalAlloc":773552,"Sys":3346432,"Lookups":6,"Mallocs":7231,"Frees":561,"HeapAlloc":773552,"HeapSys":1572864,"HeapIdle":49152,"HeapInuse":1523712,"HeapReleased":0,"HeapObjects":6670,"StackInuse":524288,"StackSys":524288,"MSpanInuse":24480,"MSpanSys":32768,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":2675,"GCSys":131072,"OtherSys":1066381,"NextGC":4194304,"LastGC":0,"PauseTotalNs":0,"PauseNs":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":0,"GCCPUFraction":0,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":126,"Frees":0},{"Size":16,"Mallocs":825,"Frees":0},{"Size":32,"Mallocs":4208,"Frees":0},{"Size":48,"Mallocs":345,"Frees":0},{"Size":64,"Mallocs":262,"Frees":0},{"Size":80,"Mallocs":93,"Frees":0},{"Size":96,"Mallocs":70,"Frees":0},{"Size":112,"Mallocs":97,"Frees":0},{"Size":128,"Mallocs":24,"Frees":0},{"Size":144,"Mallocs":25,"Frees":0},{"Size":160,"Mallocs":57,"Frees":0},{"Size":176,"Mallocs":128,"Frees":0},{"Size":192,"Mallocs":13,"Frees":0},{"Size":208,"Mallocs":77,"Frees":0},{"Size":224,"Mallocs":3,"Frees":0},{"Size":240,"Mallocs":2,"Frees":0},{"Size":256,"Mallocs":17,"Frees":0},{"Size":288,"Mallocs":64,"Frees":0},{"Size":320,"Mallocs":12,"Frees":0},{"Size":352,"Mallocs":20,"Frees":0},{"Size":384,"Mallocs":1,"Frees":0},{"Size":416,"Mallocs":59,"Frees":0},{"Size":448,"Mallocs":0,"Frees":0},{"Size":480,"Mallocs":3,"Frees":0},{"Size":512,"Mallocs":2,"Frees":0},{"Size":576,"Mallocs":17,"Frees":0},{"Size":640,"Mallocs":6,"Frees":0},{"Size":704,"Mallocs":10,"Frees":0},{"Size":768,"Mallocs":0,"Frees":0},{"Size":896,"Mallocs":11,"Frees":0},{"Size":1024,"Mallocs":11,"Frees":0},{"Size":1152,"Mallocs":12,"Frees":0},{"Size":1280,"Mallocs":2,"Frees":0},{"Size":1408,"Mallocs":2,"Frees":0},{"Size":1536,"Mallocs":0,"Frees":0},{"Size":1664,"Mallocs":10,"Frees":0},{"Size":2048,"Mallocs":17,"Frees":0},{"Size":2304,"Mallocs":7,"Frees":0},{"Size":2560,"Mallocs":1,"Frees":0},{"Size":2816,"Mallocs":1,"Frees":0},{"Size":3072,"Mallocs":1,"Frees":0},{"Size":3328,"Mallocs":7,"Frees":0},{"Size":4096,"Mallocs":4,"Frees":0},{"Size":4608,"Mallocs":1,"Frees":0},{"Size":5376,"Mallocs":6,"Frees":0},{"Size":6144,"Mallocs":4,"Frees":0},{"Size":6400,"Mallocs":0,"Frees":0},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":0,"Frees":0},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":0},{"Size":9472,"Mallocs":0,"Frees":0},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":1,"Frees":0},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":0,"Frees":0},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":1,"Frees":0}]}, - "pid": 38149, + "pid": "38149", "ratebyservice": {"service:,env:":1,"service:myapp,env:dev":0.123,"service:myapp,env:":0.123}, "ratebyservice_filtered": {"service:myapp,env:dev":0.123}, "receiver": [{}], diff --git a/pkg/trace/info/testdata/psp.json b/pkg/trace/info/testdata/psp.json index 532b6c6ba4d6a1..7ea37240e166cc 100644 --- a/pkg/trace/info/testdata/psp.json +++ b/pkg/trace/info/testdata/psp.json @@ -4,7 +4,7 @@ "trace_writer": {"Payloads":4,"Bytes":3245,"Traces":26,"Events":123,"Errors":0}, "stats_writer": {"Payloads":6,"Bytes":8329,"StatsBuckets":12,"Errors":0}, "memstats": {"Alloc":773552,"TotalAlloc":773552,"Sys":3346432,"Lookups":6,"Mallocs":7231,"Frees":561,"HeapAlloc":773552,"HeapSys":1572864,"HeapIdle":49152,"HeapInuse":1523712,"HeapReleased":0,"HeapObjects":6670,"StackInuse":524288,"StackSys":524288,"MSpanInuse":24480,"MSpanSys":32768,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":2675,"GCSys":131072,"OtherSys":1066381,"NextGC":4194304,"LastGC":0,"PauseTotalNs":0,"PauseNs":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":0,"GCCPUFraction":0,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":126,"Frees":0},{"Size":16,"Mallocs":825,"Frees":0},{"Size":32,"Mallocs":4208,"Frees":0},{"Size":48,"Mallocs":345,"Frees":0},{"Size":64,"Mallocs":262,"Frees":0},{"Size":80,"Mallocs":93,"Frees":0},{"Size":96,"Mallocs":70,"Frees":0},{"Size":112,"Mallocs":97,"Frees":0},{"Size":128,"Mallocs":24,"Frees":0},{"Size":144,"Mallocs":25,"Frees":0},{"Size":160,"Mallocs":57,"Frees":0},{"Size":176,"Mallocs":128,"Frees":0},{"Size":192,"Mallocs":13,"Frees":0},{"Size":208,"Mallocs":77,"Frees":0},{"Size":224,"Mallocs":3,"Frees":0},{"Size":240,"Mallocs":2,"Frees":0},{"Size":256,"Mallocs":17,"Frees":0},{"Size":288,"Mallocs":64,"Frees":0},{"Size":320,"Mallocs":12,"Frees":0},{"Size":352,"Mallocs":20,"Frees":0},{"Size":384,"Mallocs":1,"Frees":0},{"Size":416,"Mallocs":59,"Frees":0},{"Size":448,"Mallocs":0,"Frees":0},{"Size":480,"Mallocs":3,"Frees":0},{"Size":512,"Mallocs":2,"Frees":0},{"Size":576,"Mallocs":17,"Frees":0},{"Size":640,"Mallocs":6,"Frees":0},{"Size":704,"Mallocs":10,"Frees":0},{"Size":768,"Mallocs":0,"Frees":0},{"Size":896,"Mallocs":11,"Frees":0},{"Size":1024,"Mallocs":11,"Frees":0},{"Size":1152,"Mallocs":12,"Frees":0},{"Size":1280,"Mallocs":2,"Frees":0},{"Size":1408,"Mallocs":2,"Frees":0},{"Size":1536,"Mallocs":0,"Frees":0},{"Size":1664,"Mallocs":10,"Frees":0},{"Size":2048,"Mallocs":17,"Frees":0},{"Size":2304,"Mallocs":7,"Frees":0},{"Size":2560,"Mallocs":1,"Frees":0},{"Size":2816,"Mallocs":1,"Frees":0},{"Size":3072,"Mallocs":1,"Frees":0},{"Size":3328,"Mallocs":7,"Frees":0},{"Size":4096,"Mallocs":4,"Frees":0},{"Size":4608,"Mallocs":1,"Frees":0},{"Size":5376,"Mallocs":6,"Frees":0},{"Size":6144,"Mallocs":4,"Frees":0},{"Size":6400,"Mallocs":0,"Frees":0},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":0,"Frees":0},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":0},{"Size":9472,"Mallocs":0,"Frees":0},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":1,"Frees":0},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":0,"Frees":0},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":1,"Frees":0}]}, - "pid": 38149, + "pid": "38149", "ratebyservice": {"service:,env:":1,"service:myapp,env:dev":0.123,"service:myapp,env:":0.123}, "ratebyservice_filtered": {"service:myapp,env:dev":0.123}, "receiver": [], diff --git a/pkg/trace/info/testdata/warning.json b/pkg/trace/info/testdata/warning.json index ee824781e623f6..e43ffd13243457 100644 --- a/pkg/trace/info/testdata/warning.json +++ b/pkg/trace/info/testdata/warning.json @@ -4,7 +4,7 @@ "trace_writer": {"Payloads":4,"Bytes":3245,"Traces":26,"Errors":3}, "stats_writer": {"Payloads":6,"Bytes":8329,"StatsBuckets":12,"Errors":1}, "memstats": {"Alloc":773552,"TotalAlloc":773552,"Sys":3346432,"Lookups":6,"Mallocs":7231,"Frees":561,"HeapAlloc":773552,"HeapSys":1572864,"HeapIdle":49152,"HeapInuse":1523712,"HeapReleased":0,"HeapObjects":6670,"StackInuse":524288,"StackSys":524288,"MSpanInuse":24480,"MSpanSys":32768,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":2675,"GCSys":131072,"OtherSys":1066381,"NextGC":4194304,"LastGC":0,"PauseTotalNs":0,"PauseNs":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":0,"GCCPUFraction":0,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":126,"Frees":0},{"Size":16,"Mallocs":825,"Frees":0},{"Size":32,"Mallocs":4208,"Frees":0},{"Size":48,"Mallocs":345,"Frees":0},{"Size":64,"Mallocs":262,"Frees":0},{"Size":80,"Mallocs":93,"Frees":0},{"Size":96,"Mallocs":70,"Frees":0},{"Size":112,"Mallocs":97,"Frees":0},{"Size":128,"Mallocs":24,"Frees":0},{"Size":144,"Mallocs":25,"Frees":0},{"Size":160,"Mallocs":57,"Frees":0},{"Size":176,"Mallocs":128,"Frees":0},{"Size":192,"Mallocs":13,"Frees":0},{"Size":208,"Mallocs":77,"Frees":0},{"Size":224,"Mallocs":3,"Frees":0},{"Size":240,"Mallocs":2,"Frees":0},{"Size":256,"Mallocs":17,"Frees":0},{"Size":288,"Mallocs":64,"Frees":0},{"Size":320,"Mallocs":12,"Frees":0},{"Size":352,"Mallocs":20,"Frees":0},{"Size":384,"Mallocs":1,"Frees":0},{"Size":416,"Mallocs":59,"Frees":0},{"Size":448,"Mallocs":0,"Frees":0},{"Size":480,"Mallocs":3,"Frees":0},{"Size":512,"Mallocs":2,"Frees":0},{"Size":576,"Mallocs":17,"Frees":0},{"Size":640,"Mallocs":6,"Frees":0},{"Size":704,"Mallocs":10,"Frees":0},{"Size":768,"Mallocs":0,"Frees":0},{"Size":896,"Mallocs":11,"Frees":0},{"Size":1024,"Mallocs":11,"Frees":0},{"Size":1152,"Mallocs":12,"Frees":0},{"Size":1280,"Mallocs":2,"Frees":0},{"Size":1408,"Mallocs":2,"Frees":0},{"Size":1536,"Mallocs":0,"Frees":0},{"Size":1664,"Mallocs":10,"Frees":0},{"Size":2048,"Mallocs":17,"Frees":0},{"Size":2304,"Mallocs":7,"Frees":0},{"Size":2560,"Mallocs":1,"Frees":0},{"Size":2816,"Mallocs":1,"Frees":0},{"Size":3072,"Mallocs":1,"Frees":0},{"Size":3328,"Mallocs":7,"Frees":0},{"Size":4096,"Mallocs":4,"Frees":0},{"Size":4608,"Mallocs":1,"Frees":0},{"Size":5376,"Mallocs":6,"Frees":0},{"Size":6144,"Mallocs":4,"Frees":0},{"Size":6400,"Mallocs":0,"Frees":0},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":0,"Frees":0},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":0},{"Size":9472,"Mallocs":0,"Frees":0},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":1,"Frees":0},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":0,"Frees":0},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":1,"Frees":0}]}, - "pid": 38149, + "pid": "38149", "receiver": [{"Lang":"python","LangVersion":"2.7.6","Interpreter":"CPython","TracerVersion":"0.9.0","TracesReceived":70,"TracesDropped": {"EmptyTrace":3},"SpansMalformed": {"SpanNameEmpty":3, "TypeTruncate": 2},"TracesBytes":10679,"SpansReceived":984,"SpansDropped":184}], "ratelimiter": {"TargetRate":0.421}, "uptime": 15, diff --git a/releasenotes/notes/traceagent-pidnum-0cc687e28addae67.yaml b/releasenotes/notes/traceagent-pidnum-0cc687e28addae67.yaml new file mode 100644 index 00000000000000..b32b86388ca570 --- /dev/null +++ b/releasenotes/notes/traceagent-pidnum-0cc687e28addae67.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + APM: Fix a formatting bug where the trace-agent's PID from "agent status" could be displayed in scientific notation for large PIDs. From 8ad5926f0880a96d9ac9f318b7c535ebc4da22d2 Mon Sep 17 00:00:00 2001 From: val06 Date: Tue, 28 Jan 2025 18:36:40 +0200 Subject: [PATCH 24/97] [EBPF] added Readme to KMT test-runner (#32995) --- test/new-e2e/system-probe/test-runner/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 test/new-e2e/system-probe/test-runner/README.md diff --git a/test/new-e2e/system-probe/test-runner/README.md b/test/new-e2e/system-probe/test-runner/README.md new file mode 100644 index 00000000000000..18b0ef57199923 --- /dev/null +++ b/test/new-e2e/system-probe/test-runner/README.md @@ -0,0 +1,10 @@ +# About +This is a helper utility of the KMT framework. + +As the name suggests, this pkg is used to execute all system-probe UTs **inside** micro-vms. It is compiled on-the-fly by KMT framework. +It is leveraging the [gotestsum](https://github.com/gotestyourself/gotestsum) binary to actually execute the UTs. + +`test-runner` helper is executed as part of [micro-vm-init.sh](../test/micro-vm-init.sh), whereas the script is copied and executed inside each micro-VM via `kmt.test` invoke task. + +The results of the tests (including the junit summary file) are stored in the `/ci-visibility` directory on the target micro-VM. +See `buildTestConfiguration` in main.go of this package for the list of different parameters supported by this test-runner From 0fdf1fafef1872662ae2d837e66e695d83cbf93c Mon Sep 17 00:00:00 2001 From: Jack Phillips Date: Tue, 28 Jan 2025 11:47:46 -0500 Subject: [PATCH 25/97] Throw Error if Not admin (#33447) --- .../windows/DatadogAgentInstallScript/Install-Datadog.ps1 | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 index 023a11938a1481..8724a7800831b2 100644 --- a/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 +++ b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 @@ -193,12 +193,7 @@ try { $adminRole = [System.Security.Principal.WindowsBuiltInRole]::Administrator if (-not $myWindowsPrincipal.IsInRole($adminRole)) { # We are not running "as Administrator" - $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell"; - $newProcess.Arguments = $myInvocation.MyCommand.Definition; - $newProcess.Verb = "runas"; - $proc = [System.Diagnostics.Process]::Start($newProcess); - $proc.WaitForExit() - return $proc.ExitCode + throw "This script must be run with administrative privileges." } # First thing to do is to stop the services if they are started From 8433cf868842548d62b2974a8e8c6b2760f79f53 Mon Sep 17 00:00:00 2001 From: "John L. Peterson (Jack)" Date: Tue, 28 Jan 2025 12:19:30 -0500 Subject: [PATCH 26/97] add protoc version and upate install protoc task (#33449) --- .protoc-version | 1 + tasks/install_tasks.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 .protoc-version diff --git a/.protoc-version b/.protoc-version new file mode 100644 index 00000000000000..08a68b8516e886 --- /dev/null +++ b/.protoc-version @@ -0,0 +1 @@ +29.3 diff --git a/tasks/install_tasks.py b/tasks/install_tasks.py index 9384bdb26180e5..a3525d5bea6418 100644 --- a/tasks/install_tasks.py +++ b/tasks/install_tasks.py @@ -104,12 +104,15 @@ def install_shellcheck(ctx, version="0.8.0", destination="/usr/local/bin"): @task -def install_protoc(ctx, version="26.1"): +def install_protoc(ctx, version=None): """ Installs the requested version of protoc in the specified folder (by default /usr/local/bin). Required generate the golang code based on .prod (inv protobuf.generate). """ - + if version is None: + version_file = ".protoc-version" + with open(version_file) as f: + version = f.read().strip() if sys.platform == 'win32': print("protoc is not supported on Windows") raise Exit(code=1) From c6e4867a80663d80278af5e7c15ecf3dc768f0a8 Mon Sep 17 00:00:00 2001 From: Jack Phillips Date: Tue, 28 Jan 2025 12:40:00 -0500 Subject: [PATCH 27/97] add unit test for python-scripts (#33045) --- .github/CODEOWNERS | 2 +- .gitlab/source_test/tooling_unit_tests.yml | 1 - omnibus/config/software/datadog-agent.rb | 12 ++- omnibus/python-scripts/packages_tests.py | 70 ++++++++++++++++ omnibus/python-scripts/post_tests.py | 63 ++++++++++++++ omnibus/python-scripts/pre_tests.py | 95 ++++++++++++++++++++++ tasks/invoke_unit_tests.py | 11 +-- tasks/winbuildscripts/Invoke-UnitTests.ps1 | 9 ++ 8 files changed, 253 insertions(+), 10 deletions(-) create mode 100644 omnibus/python-scripts/packages_tests.py create mode 100644 omnibus/python-scripts/post_tests.py create mode 100644 omnibus/python-scripts/pre_tests.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 688f3fe746ea1d..4584d35913a8fa 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -274,7 +274,7 @@ /Makefile.trace @DataDog/agent-apm /omnibus/ @DataDog/agent-delivery -/omnibus/python-scripts/ @DataDog/agent-shared-components +/omnibus/python-scripts/ @DataDog/agent-shared-components @DataDog/windows-agent /omnibus/config/patches/openscap/ @DataDog/agent-cspm /omnibus/config/software/datadog-agent-integrations-*.rb @DataDog/agent-integrations /omnibus/config/software/datadog-security-agent*.rb @Datadog/agent-security @DataDog/agent-delivery diff --git a/.gitlab/source_test/tooling_unit_tests.yml b/.gitlab/source_test/tooling_unit_tests.yml index 547f742b8353e2..80c164d53550b9 100644 --- a/.gitlab/source_test/tooling_unit_tests.yml +++ b/.gitlab/source_test/tooling_unit_tests.yml @@ -10,4 +10,3 @@ invoke_unit_tests: script: - python3 -m pip install -r tasks/libs/requirements-github.txt --break-system-packages - inv -e invoke-unit-tests.run - diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index 9dd41fa755a2bf..2a2ef4c040fe3d 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -282,7 +282,13 @@ end end - python_scripts_dir = "#{project_dir}/omnibus/python-scripts" - mkdir "#{install_dir}/python-scripts" - copy "#{python_scripts_dir}/*", "#{install_dir}/python-scripts" + block do + python_scripts_dir = "#{project_dir}/omnibus/python-scripts" + mkdir "#{install_dir}/python-scripts" + Dir.glob("#{python_scripts_dir}/*").each do |file| + unless File.basename(file).end_with?('_tests.py') + copy file, "#{install_dir}/python-scripts" + end + end + end end diff --git a/omnibus/python-scripts/packages_tests.py b/omnibus/python-scripts/packages_tests.py new file mode 100644 index 00000000000000..58717e69536ce3 --- /dev/null +++ b/omnibus/python-scripts/packages_tests.py @@ -0,0 +1,70 @@ +import unittest +from packages import extract_version, create_python_installed_packages_file, create_diff_installed_packages_file, check_file_owner_system_windows +import packaging.requirements +import os +import tempfile + +class TestPackages(unittest.TestCase): + + def test_extract_version(self): + req = packaging.requirements.Requirement("package==1.0.0") + expected_version = "1.0.0" + + result = extract_version(req) + + self.assertEqual(result, expected_version) + + def test_create_python_installed_packages_file(self): + # create temp directory + test_directory = tempfile.mkdtemp() + test_filename = os.path.join(test_directory, "test_installed_packages.txt") + os.makedirs(test_directory, exist_ok=True) + + create_python_installed_packages_file(test_filename) + + self.assertTrue(os.path.exists(test_filename)) + + with open(test_filename, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + self.assertIn("invoke", content) + + + # Cleanup + os.remove(test_filename) + + # running rmdir verifies that the directory is empty + os.rmdir(test_directory) + + def test_create_diff_installed_packages_file(self): + test_directory = tempfile.mkdtemp() + old_file = os.path.join(test_directory, "old_installed_packages.txt") + new_file = os.path.join(test_directory, "new_installed_packages.txt") + diff_file = os.path.join(test_directory, ".diff_python_installed_packages.txt") + + with open(old_file, 'w', encoding='utf-8') as f: + f.write("# DO NOT REMOVE/MODIFY\n") + f.write("package==1.0.0\n") + + with open(new_file, 'w', encoding='utf-8') as f: + f.write("# DO NOT REMOVE/MODIFY\n") + f.write("package==1.0.0\n") + f.write("newpackage==2.0.0\n") + + create_diff_installed_packages_file(test_directory, old_file, new_file) + + self.assertTrue(os.path.exists(diff_file)) + + with open(diff_file, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + self.assertIn("newpackage==2.0.0", content) + + # Cleanup + os.remove(old_file) + os.remove(new_file) + os.remove(diff_file) + + # running rmdir verifies that the directory is empty + # asserts no extra files are created + os.rmdir(test_directory) diff --git a/omnibus/python-scripts/post_tests.py b/omnibus/python-scripts/post_tests.py new file mode 100644 index 00000000000000..1355d3f16fd6bc --- /dev/null +++ b/omnibus/python-scripts/post_tests.py @@ -0,0 +1,63 @@ +import unittest +import os +import tempfile +from post import post + +class TestPost(unittest.TestCase): + def test_post(self): + install_directory = tempfile.mkdtemp() + storage_location = tempfile.mkdtemp() + + result = post(install_directory, storage_location) + + # assert it ran with no errors + self.assertEqual(result, 0) + + # confirm it made .post_python_installed_packages.txt + post_file = os.path.join(storage_location, ".post_python_installed_packages.txt") + self.assertTrue(os.path.exists(post_file)) + with open(post_file, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + self.assertIn("invoke", content) + + # Cleanup + os.remove(post_file) + + # running rmdir verifies that the directory is empty + # asserts no extra files are created + os.rmdir(install_directory) + os.rmdir(storage_location) + + def test_post_with_empty_files(self): + install_directory = tempfile.mkdtemp() + storage_location = tempfile.mkdtemp() + post_file = os.path.join(storage_location, '.post_python_installed_packages.txt') + diff_file = os.path.join(storage_location, '.diff_python_installed_packages.txt') + + # Create empty post file + with open(post_file, 'w', encoding='utf-8') as f: + pass + + # Create empty diff file + with open(diff_file, 'w', encoding='utf-8') as f: + pass + + result = post(install_directory, storage_location, skip_flag=False) + + # assert it ran with no errors + self.assertEqual(result, 0) + + # confirm it made .post_python_installed_packages.txt + self.assertTrue(os.path.exists(post_file)) + with open(post_file, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + + # Cleanup + os.remove(post_file) + os.remove(diff_file) + + # running rmdir verifies that the directory is empty + os.rmdir(install_directory) + os.rmdir(storage_location) diff --git a/omnibus/python-scripts/pre_tests.py b/omnibus/python-scripts/pre_tests.py new file mode 100644 index 00000000000000..103339ead30e43 --- /dev/null +++ b/omnibus/python-scripts/pre_tests.py @@ -0,0 +1,95 @@ +import unittest +import os +import tempfile +from pre import pre + +class TestPre(unittest.TestCase): + def test_pre(self): + install_directory = tempfile.mkdtemp() + storage_location = tempfile.mkdtemp() + post_file = os.path.join(storage_location, '.post_python_installed_packages.txt') + + with open(post_file, 'w', encoding='utf-8') as f: + f.write("# DO NOT REMOVE/MODIFY\n") + f.write("package==1.0.0\n") + + result = pre(install_directory, storage_location) + + # assert it ran with no errors + self.assertEqual(result, 0) + self.assertFalse(os.path.exists(post_file)) + + # assert that the diff file was created + diff_file = os.path.join(storage_location, '.diff_python_installed_packages.txt') + self.assertTrue(os.path.exists(diff_file)) + with open(diff_file, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + self.assertIn("invoke", content) + + # Cleanup + os.remove(diff_file) + + # running rmdir verifies that the directory is empty + # asserts no extra files are created + os.rmdir(install_directory) + os.rmdir(storage_location) + + def test_pre_with_empty_files(self): + install_directory = tempfile.mkdtemp() + storage_location = tempfile.mkdtemp() + post_file = os.path.join(storage_location, '.post_python_installed_packages.txt') + diff_file = os.path.join(storage_location, '.diff_python_installed_packages.txt') + + # Create empty post file + with open(post_file, 'w', encoding='utf-8') as f: + pass + + # Create empty diff file + with open(diff_file, 'w', encoding='utf-8') as f: + pass + + result = pre(install_directory, storage_location) + + # assert it ran with no errors + self.assertEqual(result, 0) + self.assertFalse(os.path.exists(post_file)) + + # assert that the diff file was created + self.assertTrue(os.path.exists(diff_file)) + with open(diff_file, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn("# DO NOT REMOVE/MODIFY", content) + + # Cleanup + os.remove(diff_file) + os.rmdir(install_directory) + os.rmdir(storage_location) + + def test_pre_with_populated_pre_file(self): + install_directory = tempfile.mkdtemp() + storage_location = tempfile.mkdtemp() + pre_file = os.path.join(storage_location, '.pre_python_installed_packages.txt') + post_file = os.path.join(storage_location, '.post_python_installed_packages.txt') + diff_file = os.path.join(storage_location, '.diff_python_installed_packages.txt') + + # Create empty post file + with open(post_file, 'w', encoding='utf-8') as f: + pass + + # Create populated pre file + with open(pre_file, 'w', encoding='utf-8') as f: + f.write("# DO NOT REMOVE/MODIFY\n") + f.write("package==1.0.0\n") + + result = pre(install_directory, storage_location) + + # assert it ran with no errors + self.assertEqual(result, 0) + self.assertFalse(os.path.exists(post_file)) + self.assertTrue(os.path.exists(diff_file)) + + # Cleanup + os.remove(diff_file) + os.rmdir(install_directory) + os.rmdir(storage_location) diff --git a/tasks/invoke_unit_tests.py b/tasks/invoke_unit_tests.py index a71a591a438f34..ad0c2bbf41a7de 100644 --- a/tasks/invoke_unit_tests.py +++ b/tasks/invoke_unit_tests.py @@ -18,13 +18,14 @@ @task(default=True) -def run(ctx, tests: str = '', buffer: bool = True, verbosity: int = 1, debug: bool = True): +def run(ctx, tests: str = '', buffer: bool = True, verbosity: int = 1, debug: bool = True, directory: str = '.'): """ Run the unit tests on the invoke tasks - buffer: Buffer stdout / stderr from tests, useful to avoid interleaving output from tests - verbosity: Level of verbosity - debug: If True, will propagate errors to the debugger + - directory: Directory where the tests are located """ tests = [test for test in tests.split(',') if test] @@ -37,7 +38,7 @@ def run(ctx, tests: str = '', buffer: bool = True, verbosity: int = 1, debug: bo print(color_message('Running tests from module', Color.BLUE), color_message(f'{test}_tests', Color.BOLD)) pattern = '*_tests.py' if len(tests) == 0 else test + '_tests.py' - if not run_unit_tests(ctx, pattern, buffer, verbosity, debug): + if not run_unit_tests(ctx, pattern, buffer, verbosity, debug, directory): error = True # Throw error if more than one module fails @@ -48,11 +49,11 @@ def run(ctx, tests: str = '', buffer: bool = True, verbosity: int = 1, debug: bo raise Exit(code=1) else: pattern = '*_tests.py' - if not run_unit_tests(ctx, pattern, buffer, verbosity, debug): + if not run_unit_tests(ctx, pattern, buffer, verbosity, debug, directory): raise Exit(code=1) -def run_unit_tests(_, pattern, buffer, verbosity, debug): +def run_unit_tests(_, pattern, buffer, verbosity, debug, directory): import unittest old_environ = os.environ.copy() @@ -64,7 +65,7 @@ def run_unit_tests(_, pattern, buffer, verbosity, debug): os.environ[key] = value loader = unittest.TestLoader() - suite = loader.discover('.', pattern=pattern) + suite = loader.discover(directory, pattern=pattern) if debug and 'TASKS_DEBUG' in os.environ: suite.debug() diff --git a/tasks/winbuildscripts/Invoke-UnitTests.ps1 b/tasks/winbuildscripts/Invoke-UnitTests.ps1 index 3fc68e70e5072e..3f37ad591830bc 100644 --- a/tasks/winbuildscripts/Invoke-UnitTests.ps1 +++ b/tasks/winbuildscripts/Invoke-UnitTests.ps1 @@ -117,6 +117,15 @@ Invoke-BuildScript ` Write-Host -ForegroundColor Red "Agent build failed $err" exit $err } + + # Run python-script unit tests + & inv -e invoke-unit-tests --directory=".\omnibus\python-scripts\" + $err = $LASTEXITCODE + Write-Host Python-script test result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "Python-script test failed $err" + exit $err + } # Go unit tests $test_output_file = if ($Env:TEST_OUTPUT_FILE) { $Env:TEST_OUTPUT_FILE } else { "test_output.json" } From 4b22c4c88addf982db3886146b1202dd23c594e3 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 28 Jan 2025 20:00:27 +0100 Subject: [PATCH 28/97] [CSPM] re-add the kubelet build tag, needed for local hostname resolution (#33496) --- tasks/build_tags.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 8fcd60cd99fbc2..a053fc575d20c4 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -153,6 +153,7 @@ "docker", "zlib", "zstd", + "kubelet", "ec2", } From cf5f695d4b77f7c0a6329a6e5de565cca433471c Mon Sep 17 00:00:00 2001 From: Maxime Riaud <65339037+misteriaud@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:51:15 +0100 Subject: [PATCH 29/97] [ASCII-2726] Add JMX to FIPS internal deployed image job (#33490) --- .../internal_image_deploy/internal_image_deploy.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index 7c155413ced58c..4d854b8985ccee 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -51,9 +51,9 @@ docker_trigger_internal-fips: stage: internal_image_deploy rules: !reference [.on_deploy_internal_or_manual] needs: - - job: docker_build_fips_agent7 + - job: docker_build_fips_agent7_jmx artifacts: false - - job: docker_build_fips_agent7_arm64 + - job: docker_build_fips_agent7_arm64_jmx artifacts: false image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] @@ -61,9 +61,9 @@ docker_trigger_internal-fips: DYNAMIC_BUILD_RENDER_RULES: agent-build-only # fake rule to not trigger the ones in the images repo IMAGE_VERSION: tmpl-v11 IMAGE_NAME: datadog-agent - RELEASE_TAG: ${CI_COMMIT_REF_SLUG}-fips - BUILD_TAG: ${CI_COMMIT_REF_SLUG}-fips - TMPL_SRC_IMAGE: v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-fips + RELEASE_TAG: ${CI_COMMIT_REF_SLUG}-fips-jmx + BUILD_TAG: ${CI_COMMIT_REF_SLUG}-fips-jmx + TMPL_SRC_IMAGE: v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-fips-jmx TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: From c426b6d2561e134c3fe03b0d8efb03b536be7d41 Mon Sep 17 00:00:00 2001 From: Gabriel Dos Santos <91925154+gabedos@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:16:45 -0500 Subject: [PATCH 30/97] [CONTP-579] Acknowledge existence of dd_entity_id (#33501) --- pkg/config/setup/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 3f3c0ed50d4d67..c812ea629abf0a 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -546,6 +546,8 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("cluster_agent.kube_metadata_collection.resources", []string{}) config.BindEnvAndSetDefault("cluster_agent.kube_metadata_collection.resource_annotations_exclude", []string{}) config.BindEnvAndSetDefault("cluster_agent.cluster_tagger.grpc_max_message_size", 4<<20) // 4 MB + // the entity id, typically set by dca admisson controller config mutator, used for external origin detection + config.SetKnown("entity_id") // Metadata endpoints From 2f59ee607c50b9e0b1ee5b10b9594fea08b6ebc1 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 29 Jan 2025 00:06:35 +0100 Subject: [PATCH 31/97] [CWS] fix app package reporting (#33497) --- pkg/util/trivy/trivy.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index 215673b2f2706d..fbde3f2a19de98 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -256,7 +256,7 @@ type driver struct { } func (d *driver) Scan(_ context.Context, target, artifactKey string, blobKeys []string, _ types.ScanOptions) ( - results types.Results, osFound ftypes.OS, err error) { + types.Results, ftypes.OS, error) { detail, err := d.applier.ApplyLayers(artifactKey, blobKeys) switch { @@ -284,20 +284,31 @@ func (d *driver) Scan(_ context.Context, target, artifactKey string, blobKeys [] return nil, ftypes.OS{}, xerrors.Errorf("failed to apply layers: %w", err) } - result := types.Result{ + results := make([]types.Result, 0, 1+len(detail.Applications)) + + // main OS result + osresult := types.Result{ Target: fmt.Sprintf("%s (%s %s)", target, detail.OS.Family, detail.OS.Name), Class: types.ClassOSPkg, Type: detail.OS.Family, } sort.Sort(detail.Packages) - result.Packages = detail.Packages + osresult.Packages = detail.Packages + results = append(results, osresult) + for _, app := range detail.Applications { sort.Sort(app.Packages) - result.Packages = append(result.Packages, app.Packages...) + appresult := types.Result{ + Target: app.FilePath, + Class: types.ClassLangPkg, + Type: app.Type, + Packages: app.Packages, + } + results = append(results, appresult) } - return []types.Result{result}, detail.OS, nil + return results, detail.OS, nil } func (c *Collector) scan(ctx context.Context, artifact artifact.Artifact, applier applier.Applier, imgMeta *workloadmeta.ContainerImageMetadata, cache CacheWithCleaner, useCache bool) (*types.Report, error) { From 9043630238e913d7452eeca1becad1f8213412d3 Mon Sep 17 00:00:00 2001 From: Geoffrey Oxberry Date: Tue, 28 Jan 2025 18:33:31 -0800 Subject: [PATCH 32/97] regression_detector.yml: bump smp 0.20.1 -> 0.20.2 (#33511) Signed-off-by: Geoffrey M. Oxberry --- .gitlab/functional_test/regression_detector.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index cd13e211e8d75b..2f437eb391e904 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -22,7 +22,7 @@ single-machine-performance-regression_detector: - outputs/decision_record.md # for posterity, this is appended to final PR comment when: always variables: - SMP_VERSION: 0.20.1 + SMP_VERSION: 0.20.2 # See 'decision_record.md' for the determination of whether this job passes or fails. allow_failure: false script: From 67e7c6a641bdc896a53ed5d9bfc5b7f4aa77bf47 Mon Sep 17 00:00:00 2001 From: Caleb Metz <135133572+cmetz100@users.noreply.github.com> Date: Tue, 28 Jan 2025 23:06:26 -0500 Subject: [PATCH 33/97] Pass `$CI_COMMIT_BRANCH` into SMP (#33500) Signed-off-by: Caleb Metz --- .gitlab/functional_test/regression_detector.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 2f437eb391e904..e41014c56de96d 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -100,7 +100,7 @@ single-machine-performance-regression_detector: - echo "${BASELINE_SHA} | ${BASELINE_IMAGE}" - COMPARISON_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:${CI_COMMIT_SHA}-7-amd64 - echo "${CI_COMMIT_SHA} | ${COMPARISON_IMAGE}" - - SMP_TAGS="ci_pipeline_id=${CI_PIPELINE_ID},ci_job_id=${CI_JOB_ID}" + - SMP_TAGS="ci_pipeline_id=${CI_PIPELINE_ID},ci_job_id=${CI_JOB_ID},ci_commit_branch=${CI_COMMIT_BRANCH}" - echo "Tags passed through SMP are ${SMP_TAGS}" - RUST_LOG="info,aws_config::profile::credentials=error" - RUST_LOG_DEBUG="debug,aws_config::profile::credentials=error" From 0761356db9c24741b9cf2e3de311a382c6a57e58 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 29 Jan 2025 10:03:25 +0100 Subject: [PATCH 34/97] [CWS] onboard `pkg/eventmonitor/api/` protobuf to CWS proto generator (#33474) --- pkg/eventmonitor/proto/api/api.pb.go | 73 +++------- pkg/eventmonitor/proto/api/api.proto | 2 +- pkg/eventmonitor/proto/api/api_grpc.pb.go | 78 +++++----- pkg/eventmonitor/proto/api/api_vtproto.pb.go | 134 +++--------------- .../mocks/event_monitoring_module_client.go | 10 +- .../mocks/event_monitoring_module_server.go | 6 +- tasks/security_agent.py | 8 +- 7 files changed, 89 insertions(+), 222 deletions(-) diff --git a/pkg/eventmonitor/proto/api/api.pb.go b/pkg/eventmonitor/proto/api/api.pb.go index 88a9562faede92..646849267840b8 100644 --- a/pkg/eventmonitor/proto/api/api.pb.go +++ b/pkg/eventmonitor/proto/api/api.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.36.3 // protoc // source: pkg/eventmonitor/proto/api/api.proto @@ -21,20 +21,17 @@ const ( ) type GetProcessEventParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeoutSeconds int32 `protobuf:"varint,1,opt,name=TimeoutSeconds,proto3" json:"TimeoutSeconds,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + TimeoutSeconds int32 `protobuf:"varint,1,opt,name=TimeoutSeconds,proto3" json:"TimeoutSeconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetProcessEventParams) Reset() { *x = GetProcessEventParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetProcessEventParams) String() string { @@ -45,7 +42,7 @@ func (*GetProcessEventParams) ProtoMessage() {} func (x *GetProcessEventParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -68,20 +65,17 @@ func (x *GetProcessEventParams) GetTimeoutSeconds() int32 { } type ProcessEventMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProcessEventMessage) Reset() { *x = ProcessEventMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ProcessEventMessage) String() string { @@ -92,7 +86,7 @@ func (*ProcessEventMessage) ProtoMessage() {} func (x *ProcessEventMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_eventmonitor_proto_api_api_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -132,9 +126,10 @@ var file_pkg_eventmonitor_proto_api_api_proto_rawDesc = []byte{ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x18, - 0x5a, 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x1c, + 0x5a, 0x1a, 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -150,7 +145,7 @@ func file_pkg_eventmonitor_proto_api_api_proto_rawDescGZIP() []byte { } var file_pkg_eventmonitor_proto_api_api_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pkg_eventmonitor_proto_api_api_proto_goTypes = []interface{}{ +var file_pkg_eventmonitor_proto_api_api_proto_goTypes = []any{ (*GetProcessEventParams)(nil), // 0: api.GetProcessEventParams (*ProcessEventMessage)(nil), // 1: api.ProcessEventMessage } @@ -169,32 +164,6 @@ func file_pkg_eventmonitor_proto_api_api_proto_init() { if File_pkg_eventmonitor_proto_api_api_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_pkg_eventmonitor_proto_api_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProcessEventParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_eventmonitor_proto_api_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcessEventMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/pkg/eventmonitor/proto/api/api.proto b/pkg/eventmonitor/proto/api/api.proto index 02a981937fa692..4d12061dd75d10 100644 --- a/pkg/eventmonitor/proto/api/api.proto +++ b/pkg/eventmonitor/proto/api/api.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -option go_package = "pkg/security/proto/api"; +option go_package = "pkg/eventmonitor/proto/api"; package api; diff --git a/pkg/eventmonitor/proto/api/api_grpc.pb.go b/pkg/eventmonitor/proto/api/api_grpc.pb.go index 7e4ee2c91cc4c1..5433a3d16784e0 100644 --- a/pkg/eventmonitor/proto/api/api_grpc.pb.go +++ b/pkg/eventmonitor/proto/api/api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.12.4 +// - protoc-gen-go-grpc v1.5.1 +// - protoc // source: pkg/eventmonitor/proto/api/api.proto package api @@ -15,14 +15,18 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + EventMonitoringModule_GetProcessEvents_FullMethodName = "/api.EventMonitoringModule/GetProcessEvents" +) // EventMonitoringModuleClient is the client API for EventMonitoringModule service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type EventMonitoringModuleClient interface { - GetProcessEvents(ctx context.Context, in *GetProcessEventParams, opts ...grpc.CallOption) (EventMonitoringModule_GetProcessEventsClient, error) + GetProcessEvents(ctx context.Context, in *GetProcessEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ProcessEventMessage], error) } type eventMonitoringModuleClient struct { @@ -33,12 +37,13 @@ func NewEventMonitoringModuleClient(cc grpc.ClientConnInterface) EventMonitoring return &eventMonitoringModuleClient{cc} } -func (c *eventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in *GetProcessEventParams, opts ...grpc.CallOption) (EventMonitoringModule_GetProcessEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &EventMonitoringModule_ServiceDesc.Streams[0], "/api.EventMonitoringModule/GetProcessEvents", opts...) +func (c *eventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in *GetProcessEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ProcessEventMessage], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &EventMonitoringModule_ServiceDesc.Streams[0], EventMonitoringModule_GetProcessEvents_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &eventMonitoringModuleGetProcessEventsClient{stream} + x := &grpc.GenericClientStream[GetProcessEventParams, ProcessEventMessage]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -48,39 +53,29 @@ func (c *eventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in * return x, nil } -type EventMonitoringModule_GetProcessEventsClient interface { - Recv() (*ProcessEventMessage, error) - grpc.ClientStream -} - -type eventMonitoringModuleGetProcessEventsClient struct { - grpc.ClientStream -} - -func (x *eventMonitoringModuleGetProcessEventsClient) Recv() (*ProcessEventMessage, error) { - m := new(ProcessEventMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type EventMonitoringModule_GetProcessEventsClient = grpc.ServerStreamingClient[ProcessEventMessage] // EventMonitoringModuleServer is the server API for EventMonitoringModule service. // All implementations must embed UnimplementedEventMonitoringModuleServer -// for forward compatibility +// for forward compatibility. type EventMonitoringModuleServer interface { - GetProcessEvents(*GetProcessEventParams, EventMonitoringModule_GetProcessEventsServer) error + GetProcessEvents(*GetProcessEventParams, grpc.ServerStreamingServer[ProcessEventMessage]) error mustEmbedUnimplementedEventMonitoringModuleServer() } -// UnimplementedEventMonitoringModuleServer must be embedded to have forward compatible implementations. -type UnimplementedEventMonitoringModuleServer struct { -} +// UnimplementedEventMonitoringModuleServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedEventMonitoringModuleServer struct{} -func (UnimplementedEventMonitoringModuleServer) GetProcessEvents(*GetProcessEventParams, EventMonitoringModule_GetProcessEventsServer) error { +func (UnimplementedEventMonitoringModuleServer) GetProcessEvents(*GetProcessEventParams, grpc.ServerStreamingServer[ProcessEventMessage]) error { return status.Errorf(codes.Unimplemented, "method GetProcessEvents not implemented") } func (UnimplementedEventMonitoringModuleServer) mustEmbedUnimplementedEventMonitoringModuleServer() {} +func (UnimplementedEventMonitoringModuleServer) testEmbeddedByValue() {} // UnsafeEventMonitoringModuleServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to EventMonitoringModuleServer will @@ -90,6 +85,13 @@ type UnsafeEventMonitoringModuleServer interface { } func RegisterEventMonitoringModuleServer(s grpc.ServiceRegistrar, srv EventMonitoringModuleServer) { + // If the following call pancis, it indicates UnimplementedEventMonitoringModuleServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&EventMonitoringModule_ServiceDesc, srv) } @@ -98,21 +100,11 @@ func _EventMonitoringModule_GetProcessEvents_Handler(srv interface{}, stream grp if err := stream.RecvMsg(m); err != nil { return err } - return srv.(EventMonitoringModuleServer).GetProcessEvents(m, &eventMonitoringModuleGetProcessEventsServer{stream}) -} - -type EventMonitoringModule_GetProcessEventsServer interface { - Send(*ProcessEventMessage) error - grpc.ServerStream -} - -type eventMonitoringModuleGetProcessEventsServer struct { - grpc.ServerStream + return srv.(EventMonitoringModuleServer).GetProcessEvents(m, &grpc.GenericServerStream[GetProcessEventParams, ProcessEventMessage]{ServerStream: stream}) } -func (x *eventMonitoringModuleGetProcessEventsServer) Send(m *ProcessEventMessage) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type EventMonitoringModule_GetProcessEventsServer = grpc.ServerStreamingServer[ProcessEventMessage] // EventMonitoringModule_ServiceDesc is the grpc.ServiceDesc for EventMonitoringModule service. // It's only intended for direct use with grpc.RegisterService, diff --git a/pkg/eventmonitor/proto/api/api_vtproto.pb.go b/pkg/eventmonitor/proto/api/api_vtproto.pb.go index 6dbcd0ce0a0d02..359cf1e22ff269 100644 --- a/pkg/eventmonitor/proto/api/api_vtproto.pb.go +++ b/pkg/eventmonitor/proto/api/api_vtproto.pb.go @@ -1,14 +1,14 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.6.0 // source: pkg/eventmonitor/proto/api/api.proto package api import ( fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" - bits "math/bits" ) const ( @@ -49,7 +49,7 @@ func (m *GetProcessEventParams) MarshalToSizedBufferVT(dAtA []byte) (int, error) copy(dAtA[i:], m.unknownFields) } if m.TimeoutSeconds != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) i-- dAtA[i] = 0x8 } @@ -89,24 +89,13 @@ func (m *ProcessEventMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) - i = encodeVarint(dAtA, i, uint64(len(m.Data))) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Data))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} func (m *GetProcessEventParams) SizeVT() (n int) { if m == nil { return 0 @@ -114,7 +103,7 @@ func (m *GetProcessEventParams) SizeVT() (n int) { var l int _ = l if m.TimeoutSeconds != 0 { - n += 1 + sov(uint64(m.TimeoutSeconds)) + n += 1 + protohelpers.SizeOfVarint(uint64(m.TimeoutSeconds)) } n += len(m.unknownFields) return n @@ -128,18 +117,12 @@ func (m *ProcessEventMessage) SizeVT() (n int) { _ = l l = len(m.Data) if l > 0 { - n += 1 + l + sov(uint64(l)) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } n += len(m.unknownFields) return n } -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} func (m *GetProcessEventParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -148,7 +131,7 @@ func (m *GetProcessEventParams) UnmarshalVT(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflow + return protohelpers.ErrIntOverflow } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -176,7 +159,7 @@ func (m *GetProcessEventParams) UnmarshalVT(dAtA []byte) error { m.TimeoutSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflow + return protohelpers.ErrIntOverflow } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -190,12 +173,12 @@ func (m *GetProcessEventParams) UnmarshalVT(dAtA []byte) error { } default: iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + return protohelpers.ErrInvalidLength } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -218,7 +201,7 @@ func (m *ProcessEventMessage) UnmarshalVT(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflow + return protohelpers.ErrIntOverflow } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -246,7 +229,7 @@ func (m *ProcessEventMessage) UnmarshalVT(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflow + return protohelpers.ErrIntOverflow } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -259,11 +242,11 @@ func (m *ProcessEventMessage) UnmarshalVT(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLength + return protohelpers.ErrInvalidLength } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLength + return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF @@ -275,12 +258,12 @@ func (m *ProcessEventMessage) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + return protohelpers.ErrInvalidLength } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -295,88 +278,3 @@ func (m *ProcessEventMessage) UnmarshalVT(dAtA []byte) error { } return nil } - -func skip(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLength - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLength - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflow = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_client.go b/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_client.go index 1d59cb8285367f..d37e7191f0c3a0 100644 --- a/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_client.go +++ b/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_client.go @@ -18,7 +18,7 @@ type EventMonitoringModuleClient struct { } // GetProcessEvents provides a mock function with given fields: ctx, in, opts -func (_m *EventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in *api.GetProcessEventParams, opts ...grpc.CallOption) (api.EventMonitoringModule_GetProcessEventsClient, error) { +func (_m *EventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in *api.GetProcessEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[api.ProcessEventMessage], error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -32,16 +32,16 @@ func (_m *EventMonitoringModuleClient) GetProcessEvents(ctx context.Context, in panic("no return value specified for GetProcessEvents") } - var r0 api.EventMonitoringModule_GetProcessEventsClient + var r0 grpc.ServerStreamingClient[api.ProcessEventMessage] var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *api.GetProcessEventParams, ...grpc.CallOption) (api.EventMonitoringModule_GetProcessEventsClient, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.GetProcessEventParams, ...grpc.CallOption) (grpc.ServerStreamingClient[api.ProcessEventMessage], error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *api.GetProcessEventParams, ...grpc.CallOption) api.EventMonitoringModule_GetProcessEventsClient); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.GetProcessEventParams, ...grpc.CallOption) grpc.ServerStreamingClient[api.ProcessEventMessage]); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(api.EventMonitoringModule_GetProcessEventsClient) + r0 = ret.Get(0).(grpc.ServerStreamingClient[api.ProcessEventMessage]) } } diff --git a/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_server.go b/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_server.go index 8fa1071c5e624e..3cd7aa659ebd5d 100644 --- a/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_server.go +++ b/pkg/eventmonitor/proto/api/mocks/event_monitoring_module_server.go @@ -4,6 +4,8 @@ package mocks import ( api "github.com/DataDog/datadog-agent/pkg/eventmonitor/proto/api" + grpc "google.golang.org/grpc" + mock "github.com/stretchr/testify/mock" ) @@ -13,7 +15,7 @@ type EventMonitoringModuleServer struct { } // GetProcessEvents provides a mock function with given fields: _a0, _a1 -func (_m *EventMonitoringModuleServer) GetProcessEvents(_a0 *api.GetProcessEventParams, _a1 api.EventMonitoringModule_GetProcessEventsServer) error { +func (_m *EventMonitoringModuleServer) GetProcessEvents(_a0 *api.GetProcessEventParams, _a1 grpc.ServerStreamingServer[api.ProcessEventMessage]) error { ret := _m.Called(_a0, _a1) if len(ret) == 0 { @@ -21,7 +23,7 @@ func (_m *EventMonitoringModuleServer) GetProcessEvents(_a0 *api.GetProcessEvent } var r0 error - if rf, ok := ret.Get(0).(func(*api.GetProcessEventParams, api.EventMonitoringModule_GetProcessEventsServer) error); ok { + if rf, ok := ret.Get(0).(func(*api.GetProcessEventParams, grpc.ServerStreamingServer[api.ProcessEventMessage]) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) diff --git a/tasks/security_agent.py b/tasks/security_agent.py index 40b7f9aed9c13f..360526d13d86d5 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -9,6 +9,7 @@ import shutil import sys import tempfile +from itertools import chain from subprocess import check_output from invoke.exceptions import Exit @@ -662,8 +663,13 @@ def generate_cws_proto(ctx): ctx.run( f"protoc -I. {plugin_opts} --go_out=paths=source_relative:. --go-vtproto_out=. --go-vtproto_opt=features=marshal+unmarshal+size --go-grpc_out=paths=source_relative:. pkg/security/proto/api/api.proto" ) + ctx.run( + f"protoc -I. {plugin_opts} --go_out=paths=source_relative:. --go-vtproto_out=. --go-vtproto_opt=features=marshal+unmarshal+size --go-grpc_out=paths=source_relative:. pkg/eventmonitor/proto/api/api.proto" + ) - for path in glob.glob("pkg/security/**/*.pb.go", recursive=True): + security_files = glob.glob("pkg/security/**/*.pb.go", recursive=True) + eventmonitor_files = glob.glob("pkg/eventmonitor/**/*.pb.go", recursive=True) + for path in chain(security_files, eventmonitor_files): print(f"replacing protoc version in {path}") with open(path) as f: content = f.read() From 435af28ad03a67c25010cd31099dcad80947815d Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Wed, 29 Jan 2025 09:49:36 +0000 Subject: [PATCH 35/97] Fix the build-rc task (#33513) --- tasks/release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/release.py b/tasks/release.py index 4f1bfe8e13a907..d0ee30f61eca9b 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -583,7 +583,7 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False, st print(color_message("Creating RC pipeline", "bold")) # Step 2: Run the RC pipeline - run_rc_pipeline(release_branch, gitlab_tag.name, k8s_deployments) + run_rc_pipeline(ctx, release_branch, gitlab_tag.name, k8s_deployments) def get_qualification_rc_tag(ctx, release_branch): From e979f0b4e7a1eaba0f617bc5fb3a1f54b589e943 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Wed, 29 Jan 2025 11:12:17 +0100 Subject: [PATCH 36/97] fix(renovate): enable only the custom manager (#33506) --- renovate.json | 1 + 1 file changed, 1 insertion(+) diff --git a/renovate.json b/renovate.json index f7bbfee23161e6..bd95cb1f50961d 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,6 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "enabledManagers": ["custom.regex"], "customManagers" : [ { "customType": "regex", From 0c97b5e22f6ec6b7d967b21e7d02a96f52c9038e Mon Sep 17 00:00:00 2001 From: Carlos Date: Wed, 29 Jan 2025 10:15:14 +0000 Subject: [PATCH 37/97] Updated CODEOWNERS with new teams for AML/APR team split (#33236) --- .ddqa/config.toml | 16 +++ .github/CODEOWNERS | 107 +++++++++--------- .gitlab/JOBOWNERS | 2 +- comp/README.md | 24 ++-- comp/agent/jmxlogger/component.go | 2 +- comp/aggregator/bundle.go | 2 +- comp/aggregator/demultiplexer/component.go | 2 +- .../demultiplexerendpoint/def/component.go | 2 +- comp/checks/bundle.go | 2 +- comp/collector/bundle.go | 2 +- comp/collector/collector/component.go | 2 +- comp/dogstatsd/bundle.go | 2 +- comp/dogstatsd/pidmap/component.go | 2 +- comp/dogstatsd/replay/def/component.go | 2 +- comp/dogstatsd/replay/fx/fx.go | 2 +- comp/dogstatsd/server/component.go | 2 +- comp/dogstatsd/server/serverless.go | 2 +- comp/dogstatsd/serverDebug/component.go | 2 +- comp/dogstatsd/statsd/component.go | 2 +- comp/dogstatsd/status/component.go | 2 +- comp/forwarder/bundle.go | 2 +- comp/forwarder/defaultforwarder/component.go | 2 +- comp/forwarder/eventplatform/component.go | 2 +- .../eventplatformreceiver/component.go | 2 +- comp/forwarder/orchestrator/component.go | 2 +- .../orchestratorinterface/component.go | 2 +- comp/logs/adscheduler/component.go | 2 +- comp/logs/agent/component.go | 2 +- comp/logs/bundle.go | 2 +- comp/logs/integrations/def/component.go | 2 +- .../logscompression/def/component.go | 2 +- .../metricscompression/def/component.go | 2 +- tasks/libs/issue/model/constants.py | 2 + tasks/libs/pipeline/github_jira_map.yaml | 2 + tasks/libs/pipeline/github_slack_map.yaml | 2 + .../pipeline/github_slack_review_map.yaml | 2 + 36 files changed, 123 insertions(+), 90 deletions(-) diff --git a/.ddqa/config.toml b/.ddqa/config.toml index 897db37c313792..17e3811c30163b 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -10,6 +10,22 @@ github_team = "agent-metrics-logs" github_labels = ["team/agent-metrics-logs"] exclude_members = ["olivielpeau"] +[teams."Agent Logs"] +jira_project = "AGNTLOG" +jira_issue_type = "Task" +jira_statuses = ["To Do", "In Progress", "Done"] +github_team = "agent-logs" +github_labels = ["team/agent-logs"] +exclude_members = [""] + +[teams."Agent Metrics"] +jira_project = "AGTMETRICS" +jira_issue_type = "Task" +jira_statuses = ["To Do", "In Progress", "Done"] +github_team = "agent-metrics" +github_labels = ["team/agent-metrics"] +exclude_members = [""] + [teams."Agent Processing and Routing"] jira_project = "APR" jira_issue_type = "Task" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4584d35913a8fa..71dffe21a3b55e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -36,7 +36,7 @@ /Makefile.trace @DataDog/agent-delivery /mkdocs.yml @DataDog/agent-devx-infra -/release.json @DataDog/agent-delivery @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations @DataDog/agent-security +/release.json @DataDog/agent-delivery @DataDog/agent-metrics @DataDog/windows-kernel-integrations @DataDog/agent-security /renovate.json @DataDog/agent-devx-infra /requirements.txt @DataDog/agent-devx-infra /pyproject.toml @DataDog/agent-devx-infra @DataDog/agent-devx-loops @@ -95,7 +95,7 @@ /.gitlab/binary_build/linux.yml @DataDog/agent-devx-infra @DataDog/agent-delivery /.gitlab/functional_test/include.yml @DataDog/agent-devx-infra /.gitlab/install_script_testing/install_script_testing.yml @DataDog/agent-delivery @DataDog/container-ecosystems -/.gitlab/integration_test/dogstatsd.yml @DataDog/agent-devx-infra @DataDog/agent-metrics-logs +/.gitlab/integration_test/dogstatsd.yml @DataDog/agent-devx-infra @DataDog/agent-metrics /.gitlab/integration_test/include.yml @DataDog/agent-devx-infra /.gitlab/integration_test/linux.yml @DataDog/agent-devx-infra /.gitlab/integration_test/otel.yml @DataDog/agent-devx-infra @DataDog/opentelemetry @@ -188,12 +188,12 @@ /cmd/ @DataDog/agent-shared-components /cmd/trace-agent/ @DataDog/agent-apm /cmd/agent/subcommands/controlsvc @DataDog/windows-agent -/cmd/agent/subcommands/dogstatsd* @DataDog/agent-metrics-logs +/cmd/agent/subcommands/dogstatsd* @DataDog/agent-metrics /cmd/agent/subcommands/integrations @DataDog/agent-integrations @DataDog/agent-shared-components /cmd/agent/subcommands/remoteconfig @Datadog/remote-config /cmd/agent/subcommands/snmp @DataDog/ndm-core -/cmd/agent/subcommands/streamlogs @DataDog/agent-metrics-logs -/cmd/agent/subcommands/analyzelogs @DataDog/agent-metrics-logs +/cmd/agent/subcommands/streamlogs @DataDog/agent-logs +/cmd/agent/subcommands/analyzelogs @DataDog/agent-logs /cmd/agent/subcommands/streamep @DataDog/container-integrations /cmd/agent/subcommands/taggerlist @DataDog/container-platform /cmd/agent/subcommands/workloadlist @DataDog/container-platform @@ -216,7 +216,7 @@ /cmd/cluster-agent-cloudfoundry/ @DataDog/agent-integrations /cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @DataDog/agent-integrations /cmd/cws-instrumentation/ @DataDog/agent-security -/cmd/dogstatsd/ @DataDog/agent-metrics-logs +/cmd/dogstatsd/ @DataDog/agent-metrics /cmd/otel-agent/ @DataDog/opentelemetry /cmd/process-agent/ @DataDog/container-intake /cmd/serverless/ @DataDog/serverless @Datadog/serverless-aws @@ -251,15 +251,15 @@ /Dockerfiles/agent/entrypoint.ps1 @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/windows/ @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent-ot @DataDog/opentelemetry -/Dockerfiles/agent/bouncycastle-fips @DataDog/agent-metrics-logs +/Dockerfiles/agent/bouncycastle-fips @DataDog/agent-metrics /docs/ @DataDog/agent-devx-loops -/docs/dev/checks/ @DataDog/agent-metrics-logs +/docs/dev/checks/ @DataDog/agent-metrics /docs/cloud-workload-security/ @DataDog/documentation @DataDog/agent-security /docs/public/components/ @DataDog/agent-shared-components /docs/public/hostname/ @DataDog/agent-shared-components -/docs/public/architecture/dogstatsd/ @DataDog/agent-metrics-logs +/docs/public/architecture/dogstatsd/ @DataDog/agent-metrics /docs/public/guidelines/deprecated-components-documentation/ @DataDog/agent-shared-components /google-marketplace/ @DataDog/container-ecosystems @@ -279,7 +279,7 @@ /omnibus/config/software/datadog-agent-integrations-*.rb @DataDog/agent-integrations /omnibus/config/software/datadog-security-agent*.rb @Datadog/agent-security @DataDog/agent-delivery /omnibus/config/software/openscap.rb @DataDog/agent-cspm -/omnibus/config/software/sds.rb @DataDog/agent-processing-and-routing +/omnibus/config/software/sds.rb @DataDog/agent-logs /omnibus/config/software/snmp-traps.rb @DataDog/ndm-core /omnibus/resources/*/msi/ @DataDog/windows-agent @@ -287,14 +287,14 @@ # BEGIN COMPONENTS /comp @DataDog/agent-shared-components /comp/agent @DataDog/agent-shared-components -/comp/aggregator @DataDog/agent-metrics-logs +/comp/aggregator @DataDog/agent-metrics /comp/api @DataDog/agent-shared-components -/comp/checks @DataDog/agent-metrics-logs -/comp/collector @DataDog/agent-metrics-logs +/comp/checks @DataDog/agent-metrics +/comp/collector @DataDog/agent-metrics /comp/core @DataDog/agent-shared-components -/comp/dogstatsd @DataDog/agent-metrics-logs -/comp/forwarder @DataDog/agent-processing-and-routing -/comp/logs @DataDog/agent-metrics-logs +/comp/dogstatsd @DataDog/agent-metrics +/comp/forwarder @DataDog/agent-metrics +/comp/logs @DataDog/agent-logs /comp/metadata @DataDog/agent-shared-components /comp/ndmtmp @DataDog/ndm-core /comp/netflow @DataDog/ndm-integrations @@ -307,7 +307,7 @@ /comp/trace @DataDog/agent-apm /comp/updater @DataDog/fleet @DataDog/windows-agent /comp/agent/cloudfoundrycontainer @DataDog/agent-integrations -/comp/agent/jmxlogger @DataDog/agent-metrics-logs +/comp/agent/jmxlogger @DataDog/agent-metrics /comp/aggregator/diagnosesendermanager @DataDog/agent-shared-components /comp/checks/agentcrashdetect @DataDog/windows-kernel-integrations /comp/checks/windowseventlog @DataDog/windows-agent @@ -316,6 +316,9 @@ /comp/core/sysprobeconfig @DataDog/ebpf-platform /comp/core/tagger @DataDog/container-platform /comp/core/workloadmeta @DataDog/container-platform +/comp/forwarder/eventplatform @DataDog/agent-logs +/comp/forwarder/eventplatformreceiver @DataDog/agent-logs +/comp/forwarder/orchestrator @DataDog/agent-logs /comp/metadata/packagesigning @DataDog/agent-delivery /comp/trace/etwtracer @DataDog/windows-agent /comp/autoscaling/datadogclient @DataDog/container-integrations @@ -323,8 +326,8 @@ /comp/haagent @DataDog/ndm-core /comp/languagedetection/client @DataDog/container-platform /comp/rdnsquerier @DataDog/ndm-integrations -/comp/serializer/logscompression @DataDog/agent-processing-and-routing -/comp/serializer/metricscompression @DataDog/agent-processing-and-routing +/comp/serializer/logscompression @DataDog/agent-logs +/comp/serializer/metricscompression @DataDog/agent-metrics /comp/snmpscan @DataDog/ndm-core # END COMPONENTS @@ -337,9 +340,9 @@ # pkg /pkg/ @DataDog/agent-shared-components /pkg/api/ @DataDog/agent-shared-components -/pkg/aggregator/ @DataDog/agent-metrics-logs -/pkg/collector/ @DataDog/agent-metrics-logs -/pkg/commonchecks/ @DataDog/agent-metrics-logs +/pkg/aggregator/ @DataDog/agent-metrics +/pkg/collector/ @DataDog/agent-metrics +/pkg/commonchecks/ @DataDog/agent-metrics /pkg/cli/ @DataDog/agent-shared-components /pkg/cli/subcommands/clusterchecks @DataDog/container-platform /pkg/discovery/ @DataDog/universal-service-monitoring @@ -347,11 +350,11 @@ /pkg/fips @DataDog/agent-shared-components /pkg/gohai @DataDog/agent-shared-components /pkg/gpu/ @DataDog/ebpf-platform -/pkg/jmxfetch/ @DataDog/agent-metrics-logs -/pkg/metrics/ @DataDog/agent-metrics-logs -/pkg/metrics/metricsource.go @DataDog/agent-metrics-logs @DataDog/agent-integrations -/pkg/serializer/ @DataDog/agent-processing-and-routing -/pkg/serializer/internal/metrics/origin_mapping.go @DataDog/agent-processing-and-routing @DataDog/agent-integrations +/pkg/jmxfetch/ @DataDog/agent-metrics +/pkg/metrics/ @DataDog/agent-metrics +/pkg/metrics/metricsource.go @DataDog/agent-metrics @DataDog/agent-integrations +/pkg/serializer/ @DataDog/agent-metrics +/pkg/serializer/internal/metrics/origin_mapping.go @DataDog/agent-metrics @DataDog/agent-integrations /pkg/serverless/ @DataDog/serverless @Datadog/serverless-aws /pkg/serverless/appsec/ @DataDog/asm-go /pkg/status/ @DataDog/agent-shared-components @@ -370,8 +373,8 @@ /comp/core/autodiscovery/listeners/cloudfoundry*.go @DataDog/agent-integrations /comp/core/autodiscovery/listeners/snmp*.go @DataDog/ndm-core /comp/core/autodiscovery/providers/ @DataDog/container-platform -/comp/core/autodiscovery/providers/file*.go @DataDog/agent-metrics-logs -/comp/core/autodiscovery/providers/config_reader*.go @DataDog/container-platform @DataDog/agent-metrics-logs +/comp/core/autodiscovery/providers/file*.go @DataDog/agent-logs +/comp/core/autodiscovery/providers/config_reader*.go @DataDog/container-platform @DataDog/agent-logs /comp/core/autodiscovery/providers/cloudfoundry*.go @DataDog/agent-integrations /comp/core/autodiscovery/providers/remote_config*.go @DataDog/remote-config /pkg/cloudfoundry @Datadog/agent-integrations @@ -382,7 +385,7 @@ /pkg/clusteragent/admission/mutate/cwsinstrumentation @Datadog/agent-security /pkg/clusteragent/orchestrator/ @DataDog/container-app /pkg/clusteragent/telemetry/ @DataDog/apm-trace-storage -/pkg/collector/ @DataDog/agent-metrics-logs +/pkg/collector/ @DataDog/agent-metrics /pkg/collector/corechecks/cluster/ @DataDog/container-integrations /pkg/collector/corechecks/cluster/orchestrator @DataDog/container-app /pkg/collector/corechecks/containers/ @DataDog/container-integrations @@ -427,7 +430,7 @@ /pkg/containerlifecycle/ @Datadog/container-integrations /pkg/diagnose/ @Datadog/container-platform /pkg/diagnose/connectivity/ @DataDog/agent-shared-components -/pkg/diagnose/ports/ @DataDog/agent-shared-components +/pkg/diagnose/ports/ @DataDog/agent-shared-components /pkg/diagnose/ports/*windows*.go @DataDog/windows-agent /pkg/eventmonitor/ @DataDog/ebpf-platform @DataDog/agent-security /pkg/dynamicinstrumentation/ @DataDog/debugger @@ -439,7 +442,7 @@ /pkg/fleet/ @DataDog/fleet @DataDog/windows-agent /pkg/fleet/installer/setup/djm/ @DataDog/fleet @DataDog/data-jobs-monitoring /pkg/pidfile/ @DataDog/agent-shared-components -/pkg/persistentcache/ @DataDog/agent-metrics-logs +/pkg/persistentcache/ @DataDog/agent-metrics /pkg/proto/ @DataDog/agent-shared-components /pkg/proto/datadog/languagedetection @DataDog/container-intake /pkg/proto/datadog/process @DataDog/container-intake @@ -485,17 +488,17 @@ /pkg/languagedetection @DataDog/container-intake @DataDog/universal-service-monitoring /pkg/linters/ @DataDog/agent-devx-loops /pkg/linters/components/ @DataDog/agent-shared-components -/pkg/logs/ @DataDog/agent-metrics-logs -/pkg/logs/launchers/windowsevent @DataDog/agent-metrics-logs @DataDog/windows-agent -/pkg/logs/tailers/windowsevent @DataDog/agent-metrics-logs @DataDog/windows-agent -/pkg/logs/util/windowsevent @DataDog/agent-metrics-logs @DataDog/windows-agent -/pkg/logs/client @DataDog/agent-processing-and-routing -/pkg/logs/diagnostic @DataDog/agent-processing-and-routing -/pkg/logs/message @DataDog/agent-processing-and-routing -/pkg/logs/pipeline @DataDog/agent-processing-and-routing -/pkg/logs/processor @DataDog/agent-processing-and-routing -/pkg/logs/sds @DataDog/agent-processing-and-routing -/pkg/logs/sender @DataDog/agent-processing-and-routing +/pkg/logs/ @DataDog/agent-logs +/pkg/logs/launchers/windowsevent @DataDog/agent-logs @DataDog/windows-agent +/pkg/logs/tailers/windowsevent @DataDog/agent-logs @DataDog/windows-agent +/pkg/logs/util/windowsevent @DataDog/agent-logs @DataDog/windows-agent +/pkg/logs/client @DataDog/agent-logs +/pkg/logs/diagnostic @DataDog/agent-logs +/pkg/logs/message @DataDog/agent-logs +/pkg/logs/pipeline @DataDog/agent-logs +/pkg/logs/processor @DataDog/agent-logs +/pkg/logs/sds @DataDog/agent-logs +/pkg/logs/sender @DataDog/agent-logs /pkg/process/ @DataDog/container-intake /pkg/process/util/address*.go @DataDog/Networks /pkg/process/checks/net*.go @DataDog/Networks @@ -558,13 +561,13 @@ /releasenotes/ @DataDog/documentation /releasenotes-dca/ @DataDog/documentation -/rtloader/ @DataDog/agent-metrics-logs +/rtloader/ @DataDog/agent-metrics /tasks/ @DataDog/agent-devx-loops @DataDog/agent-devx-infra /tasks/msi.py @DataDog/windows-agent /tasks/agent.py @DataDog/agent-shared-components /tasks/go_deps.py @DataDog/agent-shared-components -/tasks/dogstatsd.py @DataDog/agent-metrics-logs +/tasks/dogstatsd.py @DataDog/agent-metrics /tasks/update_go.py @DataDog/agent-shared-components /tasks/unit_tests/update_go_tests.py @DataDog/agent-shared-components /tasks/cluster_agent_cloudfoundry.py @DataDog/agent-integrations @@ -576,9 +579,9 @@ /tasks/kernel_matrix_testing/ @DataDog/ebpf-platform /tasks/ebpf_verifier/ @DataDog/ebpf-platform /tasks/trace_agent.py @DataDog/agent-apm -/tasks/rtloader.py @DataDog/agent-metrics-logs +/tasks/rtloader.py @DataDog/agent-metrics /tasks/security_agent.py @DataDog/agent-security -/tasks/sds.py @DataDog/agent-processing-and-routing +/tasks/sds.py @DataDog/agent-logs /tasks/systray.py @DataDog/windows-agent /tasks/winbuildscripts/ @DataDog/windows-agent /tasks/winbuild.py @DataDog/windows-agent @@ -595,7 +598,7 @@ /tasks/unit_tests/testdata/components_src/ @DataDog/agent-shared-components /tasks/installer.py @DataDog/fleet /test/ @DataDog/agent-devx-loops -/test/benchmarks/ @DataDog/agent-metrics-logs +/test/benchmarks/ @DataDog/agent-metrics /test/benchmarks/kubernetes_state/ @DataDog/container-integrations /test/integration/ @DataDog/container-integrations /test/integration/docker/otel_agent_build_tests.py @DataDog/opentelemetry @@ -629,7 +632,7 @@ /test/new-e2e/tests/sysprobe-functional @DataDog/windows-kernel-integrations /test/new-e2e/tests/security-agent-functional @DataDog/windows-kernel-integrations @DataDog/agent-security /test/new-e2e/tests/cws @DataDog/agent-security -/test/new-e2e/tests/agent-metrics-logs @DataDog/agent-metrics-logs +/test/new-e2e/tests/agent-metrics-logs @DataDog/agent-logs /test/new-e2e/tests/windows @DataDog/windows-agent @DataDog/windows-kernel-integrations /test/new-e2e/tests/apm @DataDog/agent-apm /test/new-e2e/tests/remote-config @DataDog/remote-config @@ -638,7 +641,7 @@ /test/new-e2e/tests/gpu @Datadog/ebpf-platform /test/otel/ @DataDog/opentelemetry /test/system/ @DataDog/agent-shared-components -/test/system/dogstatsd/ @DataDog/agent-metrics-logs +/test/system/dogstatsd/ @DataDog/agent-metrics /test/benchmarks/apm_scripts/ @DataDog/agent-apm /test/regression/ @DataDog/single-machine-performance @@ -648,10 +651,10 @@ /tools/gdb/ @DataDog/agent-shared-components /tools/go-update/ @DataDog/agent-shared-components /tools/NamedPipeCmd/ @DataDog/windows-kernel-integrations -/tools/retry_file_dump/ @DataDog/agent-metrics-logs +/tools/retry_file_dump/ @DataDog/agent-metrics /tools/windows/ @DataDog/windows-agent /tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl @DataDog/windows-agent @DataDog/documentation -/tools/agent_QA/ @DataDog/agent-metrics-logs +/tools/agent_QA/ @DataDog/agent-metrics @DataDog/agent-logs /internal/tools/ @DataDog/agent-devx-loops @DataDog/agent-devx-infra /internal/third_party/client-go @DataDog/container-platform diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index 5d86e1317e8b90..4db304d41900fa 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -14,7 +14,7 @@ build_clang_* @DataDog/ebpf-platform build_processed_btfhub_archive @DataDog/ebpf-platform # Deps fetch -fetch_openjdk @DataDog/agent-metrics-logs +fetch_openjdk @DataDog/agent-metrics # Source test # Notifications are handled separately for more fine-grained control on go tests diff --git a/comp/README.md b/comp/README.md index 3df8351f3b755c..045e8ee9bdd9b9 100644 --- a/comp/README.md +++ b/comp/README.md @@ -26,13 +26,13 @@ Package expvarserver contains the component type for the expVar server. ### [comp/agent/jmxlogger](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/agent/jmxlogger) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-metrics Package jmxlogger implements the logger for JMX. ## [comp/aggregator](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/aggregator) (Component Bundle) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-metrics Package aggregator implements the "aggregator" bundle, @@ -68,7 +68,7 @@ auth_token file but can fetch it it's available. ## [comp/checks](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/checks) (Component Bundle) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-metrics Package checks implements the "checks" bundle, for all of the component based agent checks @@ -92,7 +92,7 @@ Package winregistry implements the Windows Registry check ## [comp/collector](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/collector) (Component Bundle) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-metrics Package collector defines the collector bundle. @@ -201,7 +201,7 @@ Package workloadmeta provides the workloadmeta component for the Datadog Agent ## [comp/dogstatsd](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/dogstatsd) (Component Bundle) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-metrics @@ -231,7 +231,7 @@ Package status implements the core status component information provider interfa ## [comp/forwarder](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/forwarder) (Component Bundle) -*Datadog Team*: agent-processing-and-routing +*Datadog Team*: agent-metrics Package forwarder implements the "forwarder" bundle @@ -241,14 +241,20 @@ Package defaultforwarder implements a component to send payloads to the backend ### [comp/forwarder/eventplatform](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/forwarder/eventplatform) +*Datadog Team*: agent-logs + Package eventplatform contains the logic for forwarding events to the event platform ### [comp/forwarder/eventplatformreceiver](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver) +*Datadog Team*: agent-logs + Package eventplatformreceiver implements the receiver for the event platform package ### [comp/forwarder/orchestrator](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/forwarder/orchestrator) +*Datadog Team*: agent-logs + Package orchestrator implements the orchestrator forwarder component. ### [comp/forwarder/orchestrator/orchestratorinterface](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface) @@ -257,7 +263,7 @@ Package orchestratorinterface defines the interface for the orchestrator forward ## [comp/logs](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/logs) (Component Bundle) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-logs @@ -617,13 +623,13 @@ Package rdnsquerier provides the reverse DNS querier component. ### [comp/serializer/logscompression](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/serializer/logscompression) -*Datadog Team*: agent-processing-and-routing +*Datadog Team*: agent-logs Package logscompression provides the component for logs compression ### [comp/serializer/metricscompression](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/serializer/metricscompression) -*Datadog Team*: agent-processing-and-routing +*Datadog Team*: agent-metrics Package metricscompression provides the component for metrics compression diff --git a/comp/agent/jmxlogger/component.go b/comp/agent/jmxlogger/component.go index 93303b819ede6c..4d800ab0e7b932 100644 --- a/comp/agent/jmxlogger/component.go +++ b/comp/agent/jmxlogger/component.go @@ -6,7 +6,7 @@ // Package jmxlogger implements the logger for JMX. package jmxlogger -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/aggregator/bundle.go b/comp/aggregator/bundle.go index 3539e731085cca..014444a5a99383 100644 --- a/comp/aggregator/bundle.go +++ b/comp/aggregator/bundle.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-metrics // Bundle defines the fx options for this bundle. func Bundle(params demultiplexerimpl.Params) fxutil.BundleOptions { diff --git a/comp/aggregator/demultiplexer/component.go b/comp/aggregator/demultiplexer/component.go index 17cdca40960c47..52ccb66a1e16d7 100644 --- a/comp/aggregator/demultiplexer/component.go +++ b/comp/aggregator/demultiplexer/component.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer" ) -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/aggregator/demultiplexerendpoint/def/component.go b/comp/aggregator/demultiplexerendpoint/def/component.go index 7288ff23bbfb57..182819bbbd78c8 100644 --- a/comp/aggregator/demultiplexerendpoint/def/component.go +++ b/comp/aggregator/demultiplexerendpoint/def/component.go @@ -6,7 +6,7 @@ // Package demultiplexerendpoint component provides the /dogstatsd-contexts-dump API endpoint that can register via Fx value groups. package demultiplexerendpoint -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/checks/bundle.go b/comp/checks/bundle.go index ff1ea1db88a22a..a7fe71d75bf566 100644 --- a/comp/checks/bundle.go +++ b/comp/checks/bundle.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-metrics // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { diff --git a/comp/collector/bundle.go b/comp/collector/bundle.go index aab4afeb0f17f9..0071e37dd69a09 100644 --- a/comp/collector/bundle.go +++ b/comp/collector/bundle.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-metrics // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { diff --git a/comp/collector/collector/component.go b/comp/collector/collector/component.go index 2752c9d0dd5f64..9f2400e67a4242 100644 --- a/comp/collector/collector/component.go +++ b/comp/collector/collector/component.go @@ -14,7 +14,7 @@ import ( "go.uber.org/fx" ) -// team: agent-metrics-logs +// team: agent-metrics // EventType represents the type of events emitted by the collector type EventType uint32 diff --git a/comp/dogstatsd/bundle.go b/comp/dogstatsd/bundle.go index 39b078206409c9..be3a08cde19f8a 100644 --- a/comp/dogstatsd/bundle.go +++ b/comp/dogstatsd/bundle.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-metrics // Bundle defines the fx options for this bundle. func Bundle(params server.Params) fxutil.BundleOptions { diff --git a/comp/dogstatsd/pidmap/component.go b/comp/dogstatsd/pidmap/component.go index 8112a6f639e0b6..2e32cbaaa5dbbd 100644 --- a/comp/dogstatsd/pidmap/component.go +++ b/comp/dogstatsd/pidmap/component.go @@ -6,7 +6,7 @@ // Package pidmap implements a component for tracking pid and containerID relations package pidmap -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/dogstatsd/replay/def/component.go b/comp/dogstatsd/replay/def/component.go index a7e4bb7ec0d09e..18e969dc8539bb 100644 --- a/comp/dogstatsd/replay/def/component.go +++ b/comp/dogstatsd/replay/def/component.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" ) -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/dogstatsd/replay/fx/fx.go b/comp/dogstatsd/replay/fx/fx.go index 79ae36a4306e78..667d2afdd8ec3d 100644 --- a/comp/dogstatsd/replay/fx/fx.go +++ b/comp/dogstatsd/replay/fx/fx.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-metrics // Module defines the fx options for this component. func Module() fxutil.Module { diff --git a/comp/dogstatsd/server/component.go b/comp/dogstatsd/server/component.go index 6871739c87c196..c223a3a7d51f26 100644 --- a/comp/dogstatsd/server/component.go +++ b/comp/dogstatsd/server/component.go @@ -13,7 +13,7 @@ import ( "go.uber.org/fx" ) -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/dogstatsd/server/serverless.go b/comp/dogstatsd/server/serverless.go index ba4bb5e9f6ad5b..a04034c5426c6c 100644 --- a/comp/dogstatsd/server/serverless.go +++ b/comp/dogstatsd/server/serverless.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/option" ) -// team: agent-metrics-logs +// team: agent-metrics // ServerlessDogstatsd is the interface for the serverless dogstatsd server. type ServerlessDogstatsd interface { diff --git a/comp/dogstatsd/serverDebug/component.go b/comp/dogstatsd/serverDebug/component.go index 8a08f1c038d28a..90935378bab55d 100644 --- a/comp/dogstatsd/serverDebug/component.go +++ b/comp/dogstatsd/serverDebug/component.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" ) -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/dogstatsd/statsd/component.go b/comp/dogstatsd/statsd/component.go index 8c4ba9837604f8..f306d8aa7302a6 100644 --- a/comp/dogstatsd/statsd/component.go +++ b/comp/dogstatsd/statsd/component.go @@ -10,7 +10,7 @@ import ( ddgostatsd "github.com/DataDog/datadog-go/v5/statsd" ) -// team: agent-metrics-logs +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/dogstatsd/status/component.go b/comp/dogstatsd/status/component.go index b5c14b3b9d7e93..12796891592f24 100644 --- a/comp/dogstatsd/status/component.go +++ b/comp/dogstatsd/status/component.go @@ -6,7 +6,7 @@ // Package status implements the core status component information provider interface package status -// team: agent-metrics-logs +// team: agent-metrics // Component is the status interface. type Component interface { diff --git a/comp/forwarder/bundle.go b/comp/forwarder/bundle.go index c7b7c8c64a9347..5ba4dff6940b9e 100644 --- a/comp/forwarder/bundle.go +++ b/comp/forwarder/bundle.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-processing-and-routing +// team: agent-metrics // Bundle defines the fx options for this bundle. func Bundle(params defaultforwarder.Params) fxutil.BundleOptions { diff --git a/comp/forwarder/defaultforwarder/component.go b/comp/forwarder/defaultforwarder/component.go index fbf381a2fbfb05..36e6503c5ee8db 100644 --- a/comp/forwarder/defaultforwarder/component.go +++ b/comp/forwarder/defaultforwarder/component.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-processing-and-routing +// team: agent-metrics // Component is the component type. type Component interface { diff --git a/comp/forwarder/eventplatform/component.go b/comp/forwarder/eventplatform/component.go index 65bb1c26d4b21a..ed8fbae72bfe63 100644 --- a/comp/forwarder/eventplatform/component.go +++ b/comp/forwarder/eventplatform/component.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" ) -// team: agent-processing-and-routing +// team: agent-logs const ( // EventTypeNetworkDevicesMetadata is the event type for network devices metadata diff --git a/comp/forwarder/eventplatformreceiver/component.go b/comp/forwarder/eventplatformreceiver/component.go index 8970b0e6e87e1a..1eb8a154868c0a 100644 --- a/comp/forwarder/eventplatformreceiver/component.go +++ b/comp/forwarder/eventplatformreceiver/component.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" ) -// team: agent-processing-and-routing +// team: agent-logs // Component is the component type. type Component interface { diff --git a/comp/forwarder/orchestrator/component.go b/comp/forwarder/orchestrator/component.go index f97c62e0a74a48..ea5720847f8a8d 100644 --- a/comp/forwarder/orchestrator/component.go +++ b/comp/forwarder/orchestrator/component.go @@ -8,7 +8,7 @@ package orchestrator import "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface" -// team: agent-processing-and-routing +// team: agent-logs // Component is the alias of orchestratorinterface.Component. type Component = orchestratorinterface.Component diff --git a/comp/forwarder/orchestrator/orchestratorinterface/component.go b/comp/forwarder/orchestrator/orchestratorinterface/component.go index 22ec1b32ec0585..47b48290bbf4e7 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/component.go +++ b/comp/forwarder/orchestrator/orchestratorinterface/component.go @@ -8,7 +8,7 @@ package orchestratorinterface import "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" -// team: agent-processing-and-routing +// team: agent-metrics // Component is the component type. // The main method of this component is `Get` which returns the forwarder instance only if it enabled. diff --git a/comp/logs/adscheduler/component.go b/comp/logs/adscheduler/component.go index c04779b5a11d84..2c66a8747306cb 100644 --- a/comp/logs/adscheduler/component.go +++ b/comp/logs/adscheduler/component.go @@ -6,7 +6,7 @@ // Package adscheduler is glue code to connect autodiscovery to the logs agent. It receives and filters events and converts them into log sources. package adscheduler -// team: agent-metrics-logs +// team: agent-logs // Component is the component type. type Component interface{} diff --git a/comp/logs/agent/component.go b/comp/logs/agent/component.go index cf034fb1580e95..4c45c5fe3bcd72 100644 --- a/comp/logs/agent/component.go +++ b/comp/logs/agent/component.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/sources" ) -// team: agent-metrics-logs +// team: agent-logs // Component is the component type. type Component interface { diff --git a/comp/logs/bundle.go b/comp/logs/bundle.go index 007b75f2dbe79f..4085b222afd000 100644 --- a/comp/logs/bundle.go +++ b/comp/logs/bundle.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-metrics-logs +// team: agent-logs // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { diff --git a/comp/logs/integrations/def/component.go b/comp/logs/integrations/def/component.go index 52cae199cab7a2..5147faaace93f1 100644 --- a/comp/logs/integrations/def/component.go +++ b/comp/logs/integrations/def/component.go @@ -15,7 +15,7 @@ package integrations import "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" -// team: agent-metrics-logs +// team: agent-logs // Component is the component type. type Component interface { diff --git a/comp/serializer/logscompression/def/component.go b/comp/serializer/logscompression/def/component.go index 5f959aea6ffb03..cde19c3df2eaeb 100644 --- a/comp/serializer/logscompression/def/component.go +++ b/comp/serializer/logscompression/def/component.go @@ -6,7 +6,7 @@ // Package logscompression provides the component for logs compression package logscompression -// team: agent-processing-and-routing +// team: agent-logs import ( "github.com/DataDog/datadog-agent/pkg/util/compression" diff --git a/comp/serializer/metricscompression/def/component.go b/comp/serializer/metricscompression/def/component.go index 9c1e6a03fa0c5c..786bf91f2a2cdb 100644 --- a/comp/serializer/metricscompression/def/component.go +++ b/comp/serializer/metricscompression/def/component.go @@ -6,7 +6,7 @@ // Package metricscompression provides the component for metrics compression package metricscompression -// team: agent-processing-and-routing +// team: agent-metrics import ( "github.com/DataDog/datadog-agent/pkg/util/compression" diff --git a/tasks/libs/issue/model/constants.py b/tasks/libs/issue/model/constants.py index 986d201bcf1386..b7335aa901d293 100644 --- a/tasks/libs/issue/model/constants.py +++ b/tasks/libs/issue/model/constants.py @@ -28,6 +28,8 @@ 'agent-shared-components', 'agent-integrations', 'agent-metrics-logs', + 'agent-metrics', + 'agent-logs', 'platform-integrations', 'agent-ci-experience', 'asm-go', diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml index 8376b49f9c16d5..ba89ba7de8dbd7 100644 --- a/tasks/libs/pipeline/github_jira_map.yaml +++ b/tasks/libs/pipeline/github_jira_map.yaml @@ -13,6 +13,8 @@ '@datadog/ndm-integrations': NDINT '@datadog/container-intake': CTK '@datadog/agent-metrics-logs': AMLII +'@datadog/agent-metrics': AGTMETRICS +'@datadog/agent-logs': AGNTLOG '@datadog/agent-shared-components': ASCII '@datadog/container-app': CAP '@datadog/metrics-aggregation': AGGR diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index b47b9b1436ac2d..73e7fe4df4d86b 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -15,6 +15,8 @@ '@datadog/ndm-integrations': '#ndm-integrations' '@datadog/container-intake': '#process-agent-ops' '@datadog/agent-metrics-logs': '#agent-metrics-logs' +'@datadog/agent-metrics': '#agent-metrics' +'@datadog/agent-logs': '#agent-logs' '@datadog/agent-processing-and-routing': '#agent-processing-and-routing' '@datadog/agent-shared-components': '#agent-shared-components-ops' '@datadog/container-app': '#container-app' diff --git a/tasks/libs/pipeline/github_slack_review_map.yaml b/tasks/libs/pipeline/github_slack_review_map.yaml index a84715feeaec6a..59ddec09a8a4e0 100644 --- a/tasks/libs/pipeline/github_slack_review_map.yaml +++ b/tasks/libs/pipeline/github_slack_review_map.yaml @@ -16,6 +16,8 @@ '@datadog/ndm-integrations': '#ndm-integrations' '@datadog/container-intake': '#process-agent-ops' '@datadog/agent-metrics-logs': '#agent-metrics-logs' +'@datadog/agent-metrics': '#agent-metrics' +'@datadog/agent-logs': '#agent-logs' '@datadog/agent-processing-and-routing': '#agent-processing-and-routing' '@datadog/agent-shared-components': '#agent-shared-components' '@datadog/container-app': '#container-app' From 33f058895e7251f617ee1521cef84ee7b430bd5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Wed, 29 Jan 2025 11:20:02 +0100 Subject: [PATCH 38/97] [EBPF] gpu: automatically configure cgroups for NVIDIA permissions (#32504) --- cmd/system-probe/modules/gpu.go | 24 ++++++ pkg/config/setup/system_probe.go | 1 + pkg/gpu/cgroups.go | 122 +++++++++++++++++++++++++++++++ pkg/gpu/config/config.go | 3 + 4 files changed, 150 insertions(+) create mode 100644 pkg/gpu/cgroups.go diff --git a/cmd/system-probe/modules/gpu.go b/cmd/system-probe/modules/gpu.go index c7dbac874dccd4..375b9fb7c222e6 100644 --- a/cmd/system-probe/modules/gpu.go +++ b/cmd/system-probe/modules/gpu.go @@ -10,6 +10,7 @@ package modules import ( "fmt" "net/http" + "os" "time" "github.com/NVIDIA/go-nvml/pkg/nvml" @@ -19,6 +20,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" + "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/eventmonitor" "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers" "github.com/DataDog/datadog-agent/pkg/gpu" @@ -49,6 +51,15 @@ var GPUMonitoring = module.Factory{ } c := gpuconfig.New() + + if c.ConfigureCgroupPerms { + log.Info("Configuring GPU device cgroup permissions for system-probe") + err := gpu.ConfigureDeviceCgroups(uint32(os.Getpid()), hostRoot()) + if err != nil { + log.Warnf("Failed to configure device cgroups for process: %v, gpu-monitoring module might not work properly", err) + } + } + probeDeps := gpu.ProbeDependencies{ Telemetry: deps.Telemetry, //if the config parameter doesn't exist or is empty string, the default value is used as defined in go-nvml library @@ -123,3 +134,16 @@ func createGPUProcessEventConsumer(evm *eventmonitor.EventMonitor) error { return nil } + +func hostRoot() string { + envHostRoot := os.Getenv("HOST_ROOT") + if envHostRoot != "" { + return envHostRoot + } + + if env.IsContainerized() { + return "/host" + } + + return "/" +} diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index d3326fbbe3fe6f..629c6da2d0126b 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -428,6 +428,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnv(join(gpuNS, "nvml_lib_path")) cfg.BindEnvAndSetDefault(join(gpuNS, "process_scan_interval_seconds"), 5) cfg.BindEnvAndSetDefault(join(gpuNS, "initial_process_sync"), true) + cfg.BindEnvAndSetDefault(join(gpuNS, "configure_cgroup_perms"), false) initCWSSystemProbeConfig(cfg) } diff --git a/pkg/gpu/cgroups.go b/pkg/gpu/cgroups.go new file mode 100644 index 00000000000000..f2dcf10c89234d --- /dev/null +++ b/pkg/gpu/cgroups.go @@ -0,0 +1,122 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package gpu + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// ConfigureDeviceCgroups configures the cgroups for a process to allow access to the NVIDIA character devices +func ConfigureDeviceCgroups(pid uint32, rootfs string) error { + cgroups, err := utils.GetProcControlGroups(pid, pid) + if err != nil { + return fmt.Errorf("failed to get cgroups for pid %d: %w", pid, err) + } + + if len(cgroups) == 0 { + return fmt.Errorf("no cgroups found for pid %d", pid) + } + + // Each cgroup is for a different subsystem, we only want the cgroup ID + // and we can extract that from any cgroup + cgroup := cgroups[0] + + // Configure systemd device allow first, so that in case of a reload we get the correct permissions + // The containerID for systemd is the last part of the cgroup path + systemdContainerID := filepath.Base(string(cgroup.Path)) + if err := configureDeviceAllow(systemdContainerID, rootfs, systemdDev); err != nil { + return fmt.Errorf("failed to configure systemd device allow for container %s: %w", systemdContainerID, err) + } + + // Configure cgroup device allow + if err := configureDeviceAllow(string(cgroup.Path), rootfs, cgroupDev); err != nil { + return fmt.Errorf("failed to configure cgroup device allow for container %s: %w", cgroup.Path, err) + } + + return nil +} + +const ( + systemdDeviceAllowFile = "50-DeviceAllow.conf" + systemdDeviceAllowDir = "run/systemd/transient" + cgroupDeviceAllowFile = "devices.allow" + cgroupDeviceAllowDir = "sys/fs/cgroup/devices" + nvidiaDeviceAllow = "DeviceAllow=char-nvidia rwm\n" // Allow access to the NVIDIA character devices + nvidiaCgroupAllow = "c 195:* rwm\n" // 195 is the major number for the NVIDIA character devices +) + +type deviceType string + +const ( + systemdDev deviceType = "systemd" + cgroupDev deviceType = "cgroup" +) + +func configureDeviceAllow(containerID, rootfs string, devType deviceType) error { + var deviceAllowPath string + var err error + var allowString string + + switch devType { + case systemdDev: + deviceAllowPath, err = buildSafePath(rootfs, systemdDeviceAllowDir, containerID+".d", systemdDeviceAllowFile) + allowString = nvidiaDeviceAllow + case cgroupDev: + deviceAllowPath, err = buildSafePath(rootfs, cgroupDeviceAllowDir, containerID, cgroupDeviceAllowFile) + allowString = nvidiaCgroupAllow + default: + return fmt.Errorf("unknown device type: %s", devType) + } + + if err != nil { + return fmt.Errorf("failed to build path for %s: %w", devType, err) + } + + log.Debugf("configuring %s device allow for container %s: %s", devType, containerID, deviceAllowPath) + + deviceAllowFile, err := os.OpenFile(deviceAllowPath, os.O_APPEND|os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("failed to open %s: %w", deviceAllowPath, err) + } + defer deviceAllowFile.Close() + + _, err = deviceAllowFile.WriteString(allowString) + if err != nil { + return fmt.Errorf("failed to write to %s: %w", deviceAllowPath, err) + } + + return nil +} + +// buildSafePath builds a safe path from the rootfs and basedir, and appends the +// parts to it. It assumes that rootfs and basedir are already validated paths, +// and check that the parts being added to the path do not cause the final path +// to escape the rootfs/basedir. +func buildSafePath(rootfs string, basedir string, parts ...string) (string, error) { + rootfs = strings.TrimSuffix(rootfs, "/") // Remove trailing slashes from rootfs + basedir = strings.TrimPrefix(basedir, "/") // Remove leading slashes from basedir + + // that way we can now join the paths using Sprintf to build the base directory + root := fmt.Sprintf("%s/%s", rootfs, basedir) + + // Join the parts to the base directory and create a full path. Note that this will also remove any ".." from the path + fullPath := filepath.Join(append([]string{root}, parts...)...) + + // Check that the resulting path is a child of root and that we haven't escaped the rootfs/basedir + if !strings.HasPrefix(fullPath, root) { + return "", fmt.Errorf("invalid path %s, should be a child of %s", fullPath, root) + } + + return fullPath, nil +} diff --git a/pkg/gpu/config/config.go b/pkg/gpu/config/config.go index 4323ad6630a860..0e3c6de2dbc903 100644 --- a/pkg/gpu/config/config.go +++ b/pkg/gpu/config/config.go @@ -32,6 +32,8 @@ type Config struct { InitialProcessSync bool // NVMLLibraryPath is the path of the native libnvidia-ml.so library NVMLLibraryPath string + // ConfigureCgroupPerms indicates whether the probe should configure cgroup permissions for GPU monitoring + ConfigureCgroupPerms bool } // New generates a new configuration for the GPU monitoring probe. @@ -43,5 +45,6 @@ func New() *Config { InitialProcessSync: spCfg.GetBool(sysconfig.FullKeyPath(GPUNS, "initial_process_sync")), NVMLLibraryPath: spCfg.GetString(sysconfig.FullKeyPath(GPUNS, "nvml_lib_path")), Enabled: spCfg.GetBool(sysconfig.FullKeyPath(GPUNS, "enabled")), + ConfigureCgroupPerms: spCfg.GetBool(sysconfig.FullKeyPath(GPUNS, "configure_cgroup_perms")), } } From 2ff6956a5f9e20f243fe566e0caa93b48ad30374 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hugo=20Beauz=C3=A9e-Luyssen?= Date: Wed, 29 Jan 2025 12:05:05 +0100 Subject: [PATCH 39/97] CI: minor DCA build simplification (#33516) --- .gitlab/binary_build/cluster_agent.yml | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/.gitlab/binary_build/cluster_agent.yml b/.gitlab/binary_build/cluster_agent.yml index e7c4d6c2d1ab59..c63e70d478c3e8 100644 --- a/.gitlab/binary_build/cluster_agent.yml +++ b/.gitlab/binary_build/cluster_agent.yml @@ -1,7 +1,11 @@ --- .cluster_agent-build_common: stage: binary_build - needs: ["go_mod_tidy_check"] + rules: + !reference [.on_tag_or_a7] + needs: ["go_mod_tidy_check", "go_deps"] + before_script: + - !reference [.retrieve_linux_go_deps] script: - inv check-go-version - inv -e cluster-agent.build --release-version "$RELEASE_VERSION_7" @@ -15,24 +19,14 @@ cluster_agent-build_amd64: extends: .cluster_agent-build_common - rules: - !reference [.on_tag_or_a7] image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] - needs: ["go_mod_tidy_check", "go_deps"] variables: ARCH: amd64 - before_script: - - !reference [.retrieve_linux_go_deps] cluster_agent-build_arm64: extends: .cluster_agent-build_common - rules: - !reference [.on_tag_or_a7] image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] - needs: ["go_mod_tidy_check", "go_deps"] variables: ARCH: arm64 - before_script: - - !reference [.retrieve_linux_go_deps] From 01c8fb06949e0297bfe5401b36b6ad97a4c5088a Mon Sep 17 00:00:00 2001 From: Alex Lopez Date: Wed, 29 Jan 2025 12:16:38 +0100 Subject: [PATCH 40/97] Fix pushing of tags on `release.tag-version` task (#33515) --- tasks/release.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tasks/release.py b/tasks/release.py index d0ee30f61eca9b..8a1cc3e206c980 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -234,9 +234,9 @@ def tag_version( start_qual: Will start the qualification phase for agent 6 release candidate by adding a qualification tag Examples: - $ inv -e release.tag-version 7.27.x # Create tags and push them to origin - $ inv -e release.tag-version 7.27.x --no-push # Create tags locally; don't push them - $ inv -e release.tag-version 7.29.x --force # Create tags (overwriting existing tags with the same name), force-push them to origin + $ inv -e release.tag-version -r 7.27.x # Create tags and push them to origin + $ inv -e release.tag-version -r 7.27.x --no-push # Create tags locally; don't push them + $ inv -e release.tag-version -r 7.29.x --force # Create tags (overwriting existing tags with the same name), force-push them to origin """ assert release_branch or version @@ -258,10 +258,10 @@ def tag_version( ctx, get_default_modules()["."], QUALIFICATION_TAG, commit, force_option, False ) - if push: - tags_list = ' '.join(tags) - ctx.run(f"git push origin {tags_list}{force_option}") - print(f"Pushed tag {tags_list}") + if push: + tags_list = ' '.join(tags) + ctx.run(f"git push origin {tags_list}{force_option}") + print(f"Pushed tag {tags_list}") print(f"Created tags for version {agent_version}") From d3cd712426adf52c95a01f69fbc2fa5d9f120bb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Wed, 29 Jan 2025 07:30:50 -0500 Subject: [PATCH 41/97] [ACIX-545] Fixed version deduction for release tasks (#33479) --- tasks/libs/releasing/version.py | 39 +++++++++++++++-- tasks/libs/types/version.py | 20 +++++++++ tasks/release.py | 15 +++---- tasks/unit_tests/version_tests.py | 73 ++++++++++++++++++++++++++++++- 4 files changed, 132 insertions(+), 15 deletions(-) diff --git a/tasks/libs/releasing/version.py b/tasks/libs/releasing/version.py index 762601fb4194d5..82fff5615e95b7 100644 --- a/tasks/libs/releasing/version.py +++ b/tasks/libs/releasing/version.py @@ -78,8 +78,8 @@ def current_version(ctx, major_version) -> Version: return _create_version_from_match(VERSION_RE.search(get_version(ctx, major_version=major_version, release=True))) -def next_final_version(ctx, major_version, patch_version) -> Version: - previous_version = current_version(ctx, major_version) +def next_final_version(ctx, release_branch, patch_version) -> Version: + previous_version = current_version_for_release_branch(ctx, release_branch) # Set the new version if previous_version.is_devel(): @@ -98,9 +98,40 @@ def next_final_version(ctx, major_version, patch_version) -> Version: return previous_version.next_version(bump_minor=True, rc=False) -def next_rc_version(ctx, major_version, patch_version=False) -> Version: +def current_version_for_release_branch(ctx, release_branch) -> Version: + """Finds the latest version of a release branch from tags. + + Note that this will take into account only full release or RC tags ignoring devel tags / tags with a prefix. + + Examples: + For release_branch = '7.63.x'. + - If there are ['7.63.0-rc.1', '7.63.0-rc.2'] tags, returns Version(7, 63, 0, rc=2). + - If there are ['7.63.0-rc.1', '7.63.0'] tags, returns Version(7, 63, 0). + - If there are ['7.63.0', '7.63.1-rc.1'] tags, returns Version(7, 63, 1, rc=1). + - If there are ['7.63.0', '7.63.1-rc.1', '7.63.1'] tags, returns Version(7, 63, 1). + """ + + RE_RELEASE_BRANCH = re.compile(r'(\d+)\.(\d+)\.x') + match = RE_RELEASE_BRANCH.match(release_branch) + assert match, f"Invalid release branch name: {release_branch} (should be X.YY.x)" + + # Get all the versions for this release X.YY + cmd = rf"git tag | grep -E '^{match.group(1)}\.{match.group(2)}\.[0-9]+(-rc\.[0-9]+)?$'" + res = ctx.run(cmd, hide=True, warn=True) + res = res.stdout.strip().split('\n') if res else [] + + # from_tag might return None, ignore those + versions = [v for v in sorted(Version.from_tag(tag) for tag in res) if v] + + if not versions: + return Version(int(match.group(1)), int(match.group(2)), 0) + + return versions[-1] + + +def next_rc_version(ctx, release_branch, patch_version=False) -> Version: # Fetch previous version from the most recent tag on the branch - previous_version = current_version(ctx, major_version) + previous_version = current_version_for_release_branch(ctx, release_branch) if previous_version.is_rc(): # We're already on an RC, only bump the RC version diff --git a/tasks/libs/types/version.py b/tasks/libs/types/version.py index 3e85d0c3b7fc5b..61e3f5e8d743cf 100644 --- a/tasks/libs/types/version.py +++ b/tasks/libs/types/version.py @@ -1,7 +1,27 @@ +import re from copy import deepcopy class Version: + @staticmethod + def from_tag(tag): + RE_VERSION = re.compile( + r"^v?(?P.*?)(?P\d+)\.(?P\d+)(\.(?P\d+))?(-rc\.(?P\d+))?(?P-devel)?$" + ) + + match = RE_VERSION.match(tag) + if not match: + return None + + return Version( + prefix=match.group("prefix"), + major=int(match.group("major")), + minor=int(match.group("minor")), + patch=int(match.group("patch")) if match.group("patch") else None, + rc=int(match.group("rc")) if match.group("rc") else None, + devel=bool(match.group("devel")), + ) + def __init__(self, major, minor, patch=None, rc=None, devel=False, prefix=""): self.prefix = prefix self.major = major diff --git a/tasks/release.py b/tasks/release.py index 8a1cc3e206c980..43ff33a6f2d244 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -291,7 +291,7 @@ def finish(ctx, release_branch, upstream="origin"): # find the correct new version. # To support this, we'd have to support a --patch-version param in # release.finish - new_version = next_final_version(ctx, major_version, False) + new_version = next_final_version(ctx, release_branch, False) if not yes_no_question( f'Do you want to finish the release with version {new_version}?', color="bold", default=False ): @@ -400,11 +400,11 @@ def create_rc(ctx, release_branch, patch_version=False, upstream="origin", slack # Get the version of the highest major: useful for some logging & to get # the version to use for Go submodules updates - new_highest_version = next_rc_version(ctx, major_version, patch_version) + new_highest_version = next_rc_version(ctx, release_branch, patch_version) # Get the next final version of the highest major: useful to know which # milestone to target, as well as decide which tags from dependency repositories # can be used. - new_final_version = next_final_version(ctx, major_version, patch_version) + new_final_version = next_final_version(ctx, release_branch, patch_version) print(color_message(f"Preparing RC for agent version {major_version}", "bold")) # Step 0: checks @@ -428,7 +428,7 @@ def create_rc(ctx, release_branch, patch_version=False, upstream="origin", slack # Step 1: Update release entries print(color_message("Updating release entries", "bold")) - new_version = next_rc_version(ctx, major_version, patch_version) + new_version = next_rc_version(ctx, release_branch, patch_version) update_release_json(new_version, new_final_version) @@ -520,14 +520,12 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False, st start_qual: Start the qualification phase for agent 6 release candidates. """ - major_version = get_version_major(release_branch) - with agent_context(ctx, release_branch): datadog_agent = get_gitlab_repo() # Get the version of the highest major: needed for tag_version and to know # which tag to target when creating the pipeline. - new_version = next_rc_version(ctx, major_version, patch_version) + new_version = next_rc_version(ctx, release_branch, patch_version) # Get a string representation of the RC, eg. "6/7.32.0-rc.1" versions_string = str(new_version) @@ -1154,8 +1152,7 @@ def check_for_changes(ctx, release_branch, warning_mode=False): Check if there was any modification on the release repositories since last release candidate. """ with agent_context(ctx, release_branch): - major_version = get_version_major(release_branch) - next_version = next_rc_version(ctx, major_version) + next_version = next_rc_version(ctx, release_branch) repo_data = generate_repo_data(ctx, warning_mode, next_version, release_branch) changes = 'false' for repo_name, repo in repo_data.items(): diff --git a/tasks/unit_tests/version_tests.py b/tasks/unit_tests/version_tests.py index 2f6255c68310c2..85eb42d9093570 100644 --- a/tasks/unit_tests/version_tests.py +++ b/tasks/unit_tests/version_tests.py @@ -1,11 +1,11 @@ import os import random import unittest -from unittest.mock import patch +from unittest.mock import MagicMock, patch from invoke import MockContext, Result -from tasks.libs.releasing.version import get_matching_pattern, query_version +from tasks.libs.releasing.version import current_version_for_release_branch, get_matching_pattern, query_version from tasks.libs.types.version import Version @@ -337,3 +337,72 @@ def test_on_branch(self): c = MockContext(run={}) self.assertEqual(get_matching_pattern(c, major_version="42", release=False), r"42\.*") c.run.assert_not_called() + + +class TestFromTag(unittest.TestCase): + def test_tags_standard(self): + tag = "7.62.1" + expected = Version(7, 62, 1) + + v = Version.from_tag(tag) + self.assertEqual(v, expected) + + def test_tags_prefix(self): + tag = "pref-7.62.1" + expected = Version(7, 62, 1, prefix='pref-') + + v = Version.from_tag(tag) + self.assertEqual(v, expected) + + def test_tags_rc(self): + tag = "6.53.0-rc.10" + expected = Version(6, 53, 0, rc=10) + + v = Version.from_tag(tag) + self.assertEqual(v, expected) + + def test_tags_devel(self): + tag = "7.64.0-devel" + expected = Version(7, 64, 0, devel=True) + + v = Version.from_tag(tag) + self.assertEqual(v, expected) + + +class TestCurrentVersionForReleaseBranch(unittest.TestCase): + def test_simple(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0-rc.1\n7.63.0" + version = current_version_for_release_branch(ctx, '7.63.x') + + self.assertEqual(version, Version(7, 63, 0)) + + def test_rc_version(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0-rc.1\n7.63.0-rc.2" + version = current_version_for_release_branch(ctx, '7.63.x') + self.assertEqual(version, Version(7, 63, 0, rc=2)) + + def test_rc_version_sorted(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0-rc.2\n7.63.0-rc.1" + version = current_version_for_release_branch(ctx, '7.63.x') + self.assertEqual(version, Version(7, 63, 0, rc=2)) + + def test_rc_version_sorted_hard(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0-rc.10\n7.63.0-rc.2" + version = current_version_for_release_branch(ctx, '7.63.x') + self.assertEqual(version, Version(7, 63, 0, rc=10)) + + def test_next_rc_version(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0\n7.63.1-rc.1" + version = current_version_for_release_branch(ctx, '7.63.x') + self.assertEqual(version, Version(7, 63, 1, rc=1)) + + def test_next_release_version(self): + ctx = MagicMock() + ctx.run.return_value.stdout = "7.63.0\n7.63.1-rc.1\n7.63.1" + version = current_version_for_release_branch(ctx, '7.63.x') + self.assertEqual(version, Version(7, 63, 1)) From d040bbfaca6b48d2b35ea8edd007de7f1a2b68e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Wed, 29 Jan 2025 14:30:36 +0100 Subject: [PATCH 42/97] [EBPF] kmt: add dhcpd_leases to kmt flare (#33495) --- tasks/kernel_matrix_testing/kmt_os.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tasks/kernel_matrix_testing/kmt_os.py b/tasks/kernel_matrix_testing/kmt_os.py index 67cea92538a65a..f49b6b8db89558 100644 --- a/tasks/kernel_matrix_testing/kmt_os.py +++ b/tasks/kernel_matrix_testing/kmt_os.py @@ -225,6 +225,7 @@ def flare(ctx: Context, flare_folder: Path): ctx.run(f"brew list {' '.join(MacOS.packages)} > {flare_folder / 'brew_libvirt.txt'}", warn=True) ctx.run(f"netstat -an > {flare_folder / 'netstat.txt'}", warn=True) ctx.run(f"ifconfig -a > {flare_folder / 'ifconfig.txt'}", warn=True) + ctx.run(f"cp -v /var/db/dhcpd_leases {flare_folder / 'dhcpd_leases'}", warn=True) def flare(ctx: Context, tmp_flare_folder: Path, dest_folder: Path, keep_uncompressed_files: bool = False): From 65e6e9f6d7525174652c48188389cd5445361262 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Wed, 29 Jan 2025 14:30:43 +0100 Subject: [PATCH 43/97] [EBPF] kmt: avoid cleanup job failures on canceled pipelines (#33525) --- .gitlab/kernel_matrix_testing/common.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index 27d5f339cfcd6e..e90b049d0f3a30 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -218,7 +218,7 @@ # setup_env job hasn't finished. This causes instances to be leftover for more time than necessary. - inv kmt.wait-for-setup-job --pipeline-id $CI_PIPELINE_ID --arch $ARCH --component $TEST_COMPONENT - aws ec2 describe-instances --filters $FILTER_TEAM $FILTER_MANAGED $FILTER_PIPELINE $FILTER_ARCH $FILTER_INSTANCE_TYPE $FILTER_TEST_COMPONENT --output json --query $QUERY_INSTANCE_IDS | tee -a instance.json - - cat instance.json | jq -r 'map(.[]) | .[]' | grep -v "null" | xargs -n 1 -t aws ec2 terminate-instances --instance-ids + - cat instance.json | jq -r 'map(.[]) | .[]' | grep -v "null" | xargs --no-run-if-empty -n 1 -t aws ec2 terminate-instances --instance-ids after_script: - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_API_KEY_ORG2 token) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] From 7de71e7eb9dfed9529ebf0bed519e26a363e94bb Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 29 Jan 2025 15:31:53 +0100 Subject: [PATCH 44/97] trivy: when languages are disabled, also disable individual packages (#33531) --- pkg/util/trivy/trivy.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index fbde3f2a19de98..ca61aed040ebb1 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -110,6 +110,7 @@ func DefaultDisabledCollectors(enabledAnalyzers []string) []analyzer.Type { } if analyzersDisabled(LanguagesAnalyzers) { disabledAnalyzers = append(disabledAnalyzers, analyzer.TypeLanguages...) + disabledAnalyzers = append(disabledAnalyzers, analyzer.TypeIndividualPkgs...) } if analyzersDisabled(SecretAnalyzers) { disabledAnalyzers = append(disabledAnalyzers, analyzer.TypeSecret) From 0b1e55627c4476098c3015c8afef5ee0598b3dad Mon Sep 17 00:00:00 2001 From: Stan Rozenraukh Date: Wed, 29 Jan 2025 09:45:04 -0500 Subject: [PATCH 45/97] feat(language_detection): add support for PHP (#33484) Co-authored-by: Luca Abbati Co-authored-by: Bob Weinand --- pkg/languagedetection/detector.go | 49 +++++++++++-------- pkg/languagedetection/detector_nix_test.go | 12 +++++ .../languagemodels/language.go | 23 ++++++--- ...nguage-detection-php-762044749e007343.yaml | 11 +++++ 4 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 releasenotes/notes/language-detection-php-762044749e007343.yaml diff --git a/pkg/languagedetection/detector.go b/pkg/languagedetection/detector.go index 50fbc7c19ffb56..1f23a14a6240fc 100644 --- a/pkg/languagedetection/detector.go +++ b/pkg/languagedetection/detector.go @@ -36,34 +36,43 @@ type languageFromCLI struct { validator func(exe string) bool } -// rubyPattern is a regexp validator for the ruby prefix -var rubyPattern = regexp.MustCompile(`^ruby\d+\.\d+$`) +var ( + rubyPattern = regexp.MustCompile(`^ruby\d+\.\d+$`) + phpPattern = regexp.MustCompile(`^php(?:-fpm)?\d(?:\.\d)?$`) +) + +func matchesRubyPrefix(exe string) bool { + return rubyPattern.MatchString(exe) +} + +func matchesJavaPrefix(exe string) bool { + return exe != "javac" +} + +func matchesPHPPrefix(exe string) bool { + return phpPattern.MatchString(exe) +} // knownPrefixes maps languages names to their prefix var knownPrefixes = map[string]languageFromCLI{ "python": {name: languagemodels.Python}, - "java": {name: languagemodels.Java, validator: func(exe string) bool { - return exe != "javac" - }}, - "ruby": {name: languagemodels.Ruby, validator: func(exe string) bool { - return rubyPattern.MatchString(exe) - }}, + "java": {name: languagemodels.Java, validator: matchesJavaPrefix}, + "ruby": {name: languagemodels.Ruby, validator: matchesRubyPrefix}, + "php": {name: languagemodels.PHP, validator: matchesPHPPrefix}, } // exactMatches maps an exact exe name match to a prefix var exactMatches = map[string]languageFromCLI{ - "py": {name: languagemodels.Python}, - "python": {name: languagemodels.Python}, - - "java": {name: languagemodels.Java}, - - "npm": {name: languagemodels.Node}, - "node": {name: languagemodels.Node}, - - "dotnet": {name: languagemodels.Dotnet}, - - "ruby": {name: languagemodels.Ruby}, - "rubyw": {name: languagemodels.Ruby}, + "py": {name: languagemodels.Python}, + "python": {name: languagemodels.Python}, + "java": {name: languagemodels.Java}, + "npm": {name: languagemodels.Node}, + "node": {name: languagemodels.Node}, + "dotnet": {name: languagemodels.Dotnet}, + "ruby": {name: languagemodels.Ruby}, + "rubyw": {name: languagemodels.Ruby}, + "php": {name: languagemodels.PHP}, + "php-fpm": {name: languagemodels.PHP}, } // languageNameFromCmdline returns a process's language from its command. diff --git a/pkg/languagedetection/detector_nix_test.go b/pkg/languagedetection/detector_nix_test.go index b38bb324dea522..b9ab1bebb97df3 100644 --- a/pkg/languagedetection/detector_nix_test.go +++ b/pkg/languagedetection/detector_nix_test.go @@ -112,6 +112,18 @@ func TestDetectLanguage(t *testing.T) { comm: "java", expected: languagemodels.Ruby, }, + { + name: "php", + cmdline: []string{"php", "index.php"}, + comm: "php", + expected: languagemodels.PHP, + }, + { + name: "php5", + cmdline: []string{"php5", "index.php"}, + comm: "php5", + expected: languagemodels.PHP, + }, } { t.Run(tc.name, func(t *testing.T) { process := []languagemodels.Process{makeProcess(tc.cmdline, tc.comm)} diff --git a/pkg/languagedetection/languagemodels/language.go b/pkg/languagedetection/languagemodels/language.go index 70b0acf1c5fc7d..6c2b81d0fee964 100644 --- a/pkg/languagedetection/languagemodels/language.go +++ b/pkg/languagedetection/languagemodels/language.go @@ -9,19 +9,28 @@ package languagemodels type LanguageName string const ( - //nolint:revive // TODO(PROC) Fix revive linter + // Go language name. Go LanguageName = "go" - //nolint:revive // TODO(PROC) Fix revive linter + + // Node language name. Node LanguageName = "node" - //nolint:revive // TODO(PROC) Fix revive linter + + // Dotnet language name. Dotnet LanguageName = "dotnet" - //nolint:revive // TODO(PROC) Fix revive linter + + // Python language name. Python LanguageName = "python" - //nolint:revive // TODO(PROC) Fix revive linter + + // Java language name. Java LanguageName = "java" - //nolint:revive // TODO(PROC) Fix revive linter + + // Ruby language name. Ruby LanguageName = "ruby" - //nolint:revive // TODO(PROC) Fix revive linter + + // PHP language name. + PHP LanguageName = "php" + + // Unknown language name. Unknown LanguageName = "" ) diff --git a/releasenotes/notes/language-detection-php-762044749e007343.yaml b/releasenotes/notes/language-detection-php-762044749e007343.yaml new file mode 100644 index 00000000000000..4539922e4a3ed2 --- /dev/null +++ b/releasenotes/notes/language-detection-php-762044749e007343.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Language detection adds support for detecting PHP. From d51bfeb4ee92686197d1a0ce731609d54e061297 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Momar=20TOUR=C3=89?= <36661127+mftoure@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:49:15 +0100 Subject: [PATCH 46/97] [CWS] Snapshot and replay bind events (#33265) --- pkg/security/probe/probe_ebpf.go | 37 ++++++ .../resolvers/process/resolver_ebpf.go | 11 +- pkg/security/resolvers/resolvers_ebpf.go | 23 ++++ pkg/security/secl/model/model_unix.go | 9 ++ .../activity_tree/process_node_snapshot.go | 78 +------------ pkg/security/utils/snapshot_bound_sockets.go | 107 ++++++++++++++++++ 6 files changed, 189 insertions(+), 76 deletions(-) create mode 100644 pkg/security/utils/snapshot_bound_sockets.go diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index cd91c574b88531..7e0cd1f28d8c82 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -13,6 +13,7 @@ import ( "errors" "fmt" "math" + "net" "os" "path/filepath" "runtime" @@ -582,7 +583,18 @@ func (p *EBPFProbe) playSnapshot(notifyConsumers bool) { } events = append(events, event) + + snapshotBoundSockets, ok := p.Resolvers.ProcessResolver.SnapshottedBoundSockets[event.ProcessContext.Pid] + if ok { + for _, s := range snapshotBoundSockets { + entry.Retain() + bindEvent := p.newBindEventFromSnapshot(entry, s) + events = append(events, bindEvent) + } + } + } + p.Resolvers.ProcessResolver.Walk(entryToEvent) for _, event := range events { p.DispatchEvent(event, notifyConsumers) @@ -2688,3 +2700,28 @@ func (p *EBPFProbe) newEBPFPooledEventFromPCE(entry *model.ProcessCacheEntry) *m return event } + +// newBindEventFromSnapshot returns a new bind event with a process context +func (p *EBPFProbe) newBindEventFromSnapshot(entry *model.ProcessCacheEntry, snapshottedBind model.SnapshottedBoundSocket) *model.Event { + + event := p.eventPool.Get() + event.TimestampRaw = uint64(time.Now().UnixNano()) + event.Type = uint32(model.BindEventType) + event.ProcessCacheEntry = entry + event.ProcessContext = &entry.ProcessContext + event.ProcessContext.Process.ContainerID = entry.ContainerID + event.ProcessContext.Process.CGroup = entry.CGroup + + event.Bind.SyscallEvent.Retval = 0 + event.Bind.AddrFamily = snapshottedBind.Family + event.Bind.Addr.IPNet.IP = snapshottedBind.IP + event.Bind.Protocol = snapshottedBind.Protocol + if snapshottedBind.Family == unix.AF_INET { + event.Bind.Addr.IPNet.Mask = net.CIDRMask(32, 32) + } else { + event.Bind.Addr.IPNet.Mask = net.CIDRMask(128, 128) + } + event.Bind.Addr.Port = snapshottedBind.Port + + return event +} diff --git a/pkg/security/resolvers/process/resolver_ebpf.go b/pkg/security/resolvers/process/resolver_ebpf.go index d6900406286595..64d1027fe7c9e9 100644 --- a/pkg/security/resolvers/process/resolver_ebpf.go +++ b/pkg/security/resolvers/process/resolver_ebpf.go @@ -99,8 +99,9 @@ type EBPFResolver struct { brokenLineage *atomic.Int64 inodeErrStats *atomic.Int64 - entryCache map[uint32]*model.ProcessCacheEntry - argsEnvsCache *simplelru.LRU[uint64, *argsEnvsCacheEntry] + entryCache map[uint32]*model.ProcessCacheEntry + SnapshottedBoundSockets map[uint32][]model.SnapshottedBoundSocket + argsEnvsCache *simplelru.LRU[uint64, *argsEnvsCacheEntry] processCacheEntryPool *Pool @@ -1270,6 +1271,11 @@ func (p *EBPFResolver) cacheFlush(ctx context.Context) { } } +// SyncBoundSockets sets the bound sockets discovered during the snapshot +func (p *EBPFResolver) SyncBoundSockets(pid uint32, boundSockets []model.SnapshottedBoundSocket) { + p.SnapshottedBoundSockets[pid] = boundSockets +} + // SyncCache snapshots /proc for the provided pid. func (p *EBPFResolver) SyncCache(proc *process.Process) { // Only a R lock is necessary to check if the entry exists, but if it exists, we'll update it, so a RW lock is @@ -1535,6 +1541,7 @@ func NewEBPFResolver(manager *manager.Manager, config *config.Config, statsdClie statsdClient: statsdClient, scrubber: scrubber, entryCache: make(map[uint32]*model.ProcessCacheEntry), + SnapshottedBoundSockets: make(map[uint32][]model.SnapshottedBoundSocket), opts: *opts, argsEnvsCache: argsEnvsCache, state: atomic.NewInt64(Snapshotting), diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go index 9b9dce457043e8..61c24551473348 100644 --- a/pkg/security/resolvers/resolvers_ebpf.go +++ b/pkg/security/resolvers/resolvers_ebpf.go @@ -255,6 +255,11 @@ func (r *EBPFResolvers) Snapshot() error { return err } + // snapshot sockets + if err := r.snapshotBoundSockets(); err != nil { + return fmt.Errorf("unable to snapshot bound sockets: %w", err) + } + return nil } @@ -318,6 +323,24 @@ func (r *EBPFResolvers) snapshot() error { return nil } +func (r *EBPFResolvers) snapshotBoundSockets() error { + processes, err := utils.GetProcesses() + if err != nil { + return err + } + + for _, proc := range processes { + bs, err := utils.GetBoundSockets(proc) + if err != nil { + log.Debugf("sockets snapshot failed for (pid: %v): %s", proc.Pid, err) + continue + } + r.ProcessResolver.SyncBoundSockets(uint32(proc.Pid), bs) + } + + return nil +} + // Close cleans up any underlying resolver that requires a cleanup func (r *EBPFResolvers) Close() error { // clean up the handles in netns resolver diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index b06a59b389414a..458a0b36cb3cb8 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -11,6 +11,7 @@ package model import ( + "net" "net/netip" "time" @@ -752,6 +753,14 @@ type LoginUIDWriteEvent struct { AUID uint32 `field:"-"` } +// SnapshottedBoundSocket represents a snapshotted bound socket +type SnapshottedBoundSocket struct { + IP net.IP + Port uint16 + Family uint16 + Protocol uint16 +} + // RawPacketEvent represents a packet event type RawPacketEvent struct { NetworkContext diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index 506ec5e6b62a21..6e374e1af1c77a 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -10,7 +10,6 @@ package activitytree import ( "bufio" - "fmt" "math/rand" "net" "os" @@ -22,7 +21,6 @@ import ( "syscall" "time" - "github.com/prometheus/procfs" "github.com/shirou/gopsutil/v4/process" "golang.org/x/sys/unix" @@ -268,84 +266,16 @@ func extractPathFromSmapsLine(line []byte) (string, bool) { } func (pn *ProcessNode) snapshotBoundSockets(p *process.Process, stats *Stats, newEvent func() *model.Event) { - // list all the file descriptors opened by the process - FDs, err := p.OpenFiles() + boundSockets, err := utils.GetBoundSockets(p) if err != nil { - seclog.Warnf("error while listing files (pid: %v): %s", p.Pid, err) + seclog.Warnf("error while listing sockets (pid: %v): %s", p.Pid, err) return } - // sockets have the following pattern "socket:[inode]" - var sockets []uint64 - for _, fd := range FDs { - if strings.HasPrefix(fd.Path, "socket:[") { - sock, err := strconv.Atoi(strings.TrimPrefix(fd.Path[:len(fd.Path)-1], "socket:[")) - if err != nil { - seclog.Warnf("error while parsing socket inode (pid: %v): %s", p.Pid, err) - continue - } - if sock < 0 { - continue - } - sockets = append(sockets, uint64(sock)) - } - } - if len(sockets) <= 0 { - return + for _, socket := range boundSockets { + pn.insertSnapshottedSocket(socket.Family, socket.IP, socket.Protocol, socket.Port, stats, newEvent) } - // use /proc/[pid]/net/tcp,tcp6,udp,udp6 to extract the ports opened by the current process - proc, _ := procfs.NewFS(filepath.Join(kernel.HostProc(fmt.Sprintf("%d", p.Pid)))) - if err != nil { - seclog.Warnf("error while opening procfs (pid: %v): %s", p.Pid, err) - } - // looking for AF_INET sockets - TCP, err := proc.NetTCP() - if err != nil { - seclog.Debugf("couldn't snapshot TCP sockets: %v", err) - } - UDP, err := proc.NetUDP() - if err != nil { - seclog.Debugf("couldn't snapshot UDP sockets: %v", err) - } - // looking for AF_INET6 sockets - TCP6, err := proc.NetTCP6() - if err != nil { - seclog.Debugf("couldn't snapshot TCP6 sockets: %v", err) - } - UDP6, err := proc.NetUDP6() - if err != nil { - seclog.Debugf("couldn't snapshot UDP6 sockets: %v", err) - } - - // searching for socket inode - for _, s := range sockets { - for _, sock := range TCP { - if sock.Inode == s { - pn.insertSnapshottedSocket(unix.AF_INET, sock.LocalAddr, unix.IPPROTO_TCP, uint16(sock.LocalPort), stats, newEvent) - break - } - } - for _, sock := range UDP { - if sock.Inode == s { - pn.insertSnapshottedSocket(unix.AF_INET, sock.LocalAddr, unix.IPPROTO_UDP, uint16(sock.LocalPort), stats, newEvent) - break - } - } - for _, sock := range TCP6 { - if sock.Inode == s { - pn.insertSnapshottedSocket(unix.AF_INET6, sock.LocalAddr, unix.IPPROTO_TCP, uint16(sock.LocalPort), stats, newEvent) - break - } - } - for _, sock := range UDP6 { - if sock.Inode == s { - pn.insertSnapshottedSocket(unix.AF_INET6, sock.LocalAddr, unix.IPPROTO_UDP, uint16(sock.LocalPort), stats, newEvent) - break - } - } - // not necessary found here, can be also another kind of socket (AF_UNIX, AF_NETLINK, etc) - } } func (pn *ProcessNode) insertSnapshottedSocket(family uint16, ip net.IP, protocol uint16, port uint16, stats *Stats, newEvent func() *model.Event) { diff --git a/pkg/security/utils/snapshot_bound_sockets.go b/pkg/security/utils/snapshot_bound_sockets.go new file mode 100644 index 00000000000000..6b01dd5c731e92 --- /dev/null +++ b/pkg/security/utils/snapshot_bound_sockets.go @@ -0,0 +1,107 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package utils holds utils related files +package utils + +import ( + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/seclog" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/prometheus/procfs" + "github.com/shirou/gopsutil/v4/process" + "golang.org/x/sys/unix" +) + +// GetBoundSockets returns the list of bound sockets for a given process +func GetBoundSockets(p *process.Process) ([]model.SnapshottedBoundSocket, error) { + + boundSockets := []model.SnapshottedBoundSocket{} + + // list all the file descriptors opened by the process + FDs, err := p.OpenFiles() + if err != nil { + seclog.Warnf("error while listing files (pid: %v): %s", p.Pid, err) + return nil, err + } + + // sockets have the following pattern "socket:[inode]" + var sockets []uint64 + for _, fd := range FDs { + if strings.HasPrefix(fd.Path, "socket:[") { + sock, err := strconv.Atoi(strings.TrimPrefix(fd.Path[:len(fd.Path)-1], "socket:[")) + if err != nil { + seclog.Warnf("error while parsing socket inode (pid: %v): %s", p.Pid, err) + continue + } + if sock < 0 { + continue + } + sockets = append(sockets, uint64(sock)) + } + } + + // use /proc/[pid]/net/tcp,tcp6,udp,udp6 to extract the ports opened by the current process + proc, _ := procfs.NewFS(filepath.Join(kernel.HostProc(fmt.Sprintf("%d", p.Pid)))) + if err != nil { + seclog.Warnf("error while opening procfs (pid: %v): %s", p.Pid, err) + } + // looking for AF_INET sockets + TCP, err := proc.NetTCP() + if err != nil { + seclog.Debugf("couldn't snapshot TCP sockets: %v", err) + } + UDP, err := proc.NetUDP() + if err != nil { + seclog.Debugf("couldn't snapshot UDP sockets: %v", err) + } + // looking for AF_INET6 sockets + TCP6, err := proc.NetTCP6() + if err != nil { + seclog.Debugf("couldn't snapshot TCP6 sockets: %v", err) + } + UDP6, err := proc.NetUDP6() + if err != nil { + seclog.Debugf("couldn't snapshot UDP6 sockets: %v", err) + } + + // searching for socket inode + for _, s := range sockets { + for _, sock := range TCP { + if sock.Inode == s { + boundSockets = append(boundSockets, model.SnapshottedBoundSocket{IP: sock.LocalAddr, Port: uint16(sock.LocalPort), Family: unix.AF_INET, Protocol: unix.IPPROTO_TCP}) + break + } + } + for _, sock := range UDP { + if sock.Inode == s { + boundSockets = append(boundSockets, model.SnapshottedBoundSocket{IP: sock.LocalAddr, Port: uint16(sock.LocalPort), Family: unix.AF_INET, Protocol: unix.IPPROTO_UDP}) + break + } + } + for _, sock := range TCP6 { + if sock.Inode == s { + boundSockets = append(boundSockets, model.SnapshottedBoundSocket{IP: sock.LocalAddr, Port: uint16(sock.LocalPort), Family: unix.AF_INET6, Protocol: unix.IPPROTO_TCP}) + break + } + } + for _, sock := range UDP6 { + if sock.Inode == s { + boundSockets = append(boundSockets, model.SnapshottedBoundSocket{IP: sock.LocalAddr, Port: uint16(sock.LocalPort), Family: unix.AF_INET6, Protocol: unix.IPPROTO_UDP}) + break + } + } + // not necessary found here, can be also another kind of socket (AF_UNIX, AF_NETLINK, etc) + } + + return boundSockets, nil +} From 249960fed8b82088f8cdcac43d74d710545709cb Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Wed, 29 Jan 2025 16:21:47 +0100 Subject: [PATCH 47/97] discovery: move process scanning to system-probe (#32128) --- .../corechecks/servicediscovery/events.go | 69 +-- .../servicediscovery/events_test.go | 92 ++-- .../corechecks/servicediscovery/impl_linux.go | 120 +---- .../servicediscovery/impl_linux_test.go | 70 +-- .../servicediscovery/model/model.go | 7 +- .../servicediscovery/module/comm_test.go | 23 +- .../servicediscovery/module/config_test.go | 12 +- .../module/ignore_proc_test.go | 2 +- .../servicediscovery/module/impl_linux.go | 361 ++++++++++---- .../module/impl_linux_test.go | 457 ++++++++++++------ .../servicediscovery/servicediscovery.go | 37 +- .../servicediscovery/servicediscovery_mock.go | 6 +- test/new-e2e/tests/discovery/linux_test.go | 2 +- 13 files changed, 745 insertions(+), 513 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index f1d040dacefcb4..aef9a54c361ac9 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -61,14 +62,14 @@ type telemetrySender struct { hostname hostname.Component } -func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { +func (ts *telemetrySender) newEvent(t eventType, service model.Service) *event { host := ts.hostname.GetSafe(context.Background()) env := pkgconfigsetup.Datadog().GetString("env") nameSource := "" - if svc.service.DDService != "" { + if service.DDService != "" { nameSource = "provided" - if svc.service.DDServiceInjected { + if service.DDServiceInjected { nameSource = "injected" } } @@ -78,27 +79,27 @@ func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { APIVersion: "v2", Payload: &eventPayload{ NamingSchemaVersion: "1", - ServiceName: svc.meta.Name, - GeneratedServiceName: svc.service.GeneratedName, - GeneratedServiceNameSource: svc.service.GeneratedNameSource, - ContainerServiceName: svc.service.ContainerServiceName, - ContainerServiceNameSource: svc.service.ContainerServiceNameSource, - DDService: svc.service.DDService, + ServiceName: service.Name, + GeneratedServiceName: service.GeneratedName, + GeneratedServiceNameSource: service.GeneratedNameSource, + ContainerServiceName: service.ContainerServiceName, + ContainerServiceNameSource: service.ContainerServiceNameSource, + DDService: service.DDService, HostName: host, Env: env, - ServiceLanguage: svc.meta.Language, - ServiceType: svc.meta.Type, - StartTime: int64(svc.service.StartTimeMilli / 1000), - StartTimeMilli: int64(svc.service.StartTimeMilli), - LastSeen: svc.LastHeartbeat.Unix(), - APMInstrumentation: svc.meta.APMInstrumentation, + ServiceLanguage: service.Language, + ServiceType: service.Type, + StartTime: int64(service.StartTimeMilli / 1000), + StartTimeMilli: int64(service.StartTimeMilli), + LastSeen: service.LastHeartbeat, + APMInstrumentation: service.APMInstrumentation, ServiceNameSource: nameSource, - Ports: svc.service.Ports, - PID: svc.service.PID, - CommandLine: svc.service.CommandLine, - RSSMemory: svc.service.RSS, - CPUCores: svc.service.CPUCores, - ContainerID: svc.service.ContainerID, + Ports: service.Ports, + PID: service.PID, + CommandLine: service.CommandLine, + RSSMemory: service.RSS, + CPUCores: service.CPUCores, + ContainerID: service.ContainerID, }, } } @@ -110,14 +111,14 @@ func newTelemetrySender(sender sender.Sender) *telemetrySender { } } -func (ts *telemetrySender) sendStartServiceEvent(svc serviceInfo) { +func (ts *telemetrySender) sendStartServiceEvent(service model.Service) { log.Debugf("[pid: %d | name: %s | ports: %v] start-service", - svc.service.PID, - svc.meta.Name, - svc.service.Ports, + service.PID, + service.Name, + service.Ports, ) - e := ts.newEvent(eventTypeStartService, svc) + e := ts.newEvent(eventTypeStartService, service) b, err := json.Marshal(e) if err != nil { log.Errorf("failed to encode start-service event as json: %v", err) @@ -127,13 +128,13 @@ func (ts *telemetrySender) sendStartServiceEvent(svc serviceInfo) { ts.sender.EventPlatformEvent(b, eventplatform.EventTypeServiceDiscovery) } -func (ts *telemetrySender) sendHeartbeatServiceEvent(svc serviceInfo) { +func (ts *telemetrySender) sendHeartbeatServiceEvent(service model.Service) { log.Debugf("[pid: %d | name: %s] heartbeat-service", - svc.service.PID, - svc.meta.Name, + service.PID, + service.Name, ) - e := ts.newEvent(eventTypeHeartbeatService, svc) + e := ts.newEvent(eventTypeHeartbeatService, service) b, err := json.Marshal(e) if err != nil { log.Errorf("failed to encode heartbeat-service event as json: %v", err) @@ -143,13 +144,13 @@ func (ts *telemetrySender) sendHeartbeatServiceEvent(svc serviceInfo) { ts.sender.EventPlatformEvent(b, eventplatform.EventTypeServiceDiscovery) } -func (ts *telemetrySender) sendEndServiceEvent(svc serviceInfo) { +func (ts *telemetrySender) sendEndServiceEvent(service model.Service) { log.Debugf("[pid: %d | name: %s] end-service", - svc.service.PID, - svc.meta.Name, + service.PID, + service.Name, ) - e := ts.newEvent(eventTypeEndService, svc) + e := ts.newEvent(eventTypeEndService, service) b, err := json.Marshal(e) if err != nil { log.Errorf("failed to encode end-service event as json: %v", err) diff --git a/pkg/collector/corechecks/servicediscovery/events_test.go b/pkg/collector/corechecks/servicediscovery/events_test.go index b38dc5ddf084fe..81d322ffdf2782 100644 --- a/pkg/collector/corechecks/servicediscovery/events_test.go +++ b/pkg/collector/corechecks/servicediscovery/events_test.go @@ -44,9 +44,7 @@ func Test_telemetrySender(t *testing.T) { mSender := mocksender.NewMockSender("test-servicediscovery") mSender.SetupAcceptAll() - mTimer := NewMocktimer(ctrl) now := time.Date(2024, 5, 13, 0, 0, 0, 0, time.UTC) - mTimer.EXPECT().Now().Return(now).AnyTimes() host := "test-host" _, mHostname := hostnameinterface.NewMock(hostnameinterface.MockHostname(host)) @@ -54,34 +52,30 @@ func Test_telemetrySender(t *testing.T) { ts := newTelemetrySender(mSender) ts.hostname = mHostname - svc := serviceInfo{ - service: model.Service{ - PID: 99, - CommandLine: []string{"test-service", "--args"}, - Ports: []uint16{80, 8080}, - StartTimeMilli: uint64(now.Add(-20 * time.Minute).UnixMilli()), - RSS: 500 * 1024 * 1024, - GeneratedName: "generated-name", - GeneratedNameSource: "generated-name-source", - ContainerServiceName: "container-service-name", - ContainerServiceNameSource: "service", - DDService: "dd-service", - DDServiceInjected: true, - CPUCores: 1.5, - ContainerID: "abcd", - }, - meta: ServiceMetadata{ - Name: "test-service", - Language: "jvm", - Type: "web_service", - APMInstrumentation: "injected", - }, - LastHeartbeat: now, + service := model.Service{ + PID: 99, + Name: "test-service", + GeneratedName: "generated-name", + GeneratedNameSource: "generated-name-source", + ContainerServiceName: "container-service-name", + ContainerServiceNameSource: "service", + DDService: "dd-service", + DDServiceInjected: true, + Ports: []uint16{80, 8080}, + APMInstrumentation: "injected", + Language: "jvm", + Type: "web_service", + RSS: 500 * 1024 * 1024, + CommandLine: []string{"test-service", "--args"}, + StartTimeMilli: uint64(now.Add(-20 * time.Minute).UnixMilli()), + CPUCores: 1.5, + ContainerID: "abcd", + LastHeartbeat: now.Unix(), } - ts.sendStartServiceEvent(svc) - ts.sendHeartbeatServiceEvent(svc) - ts.sendEndServiceEvent(svc) + ts.sendStartServiceEvent(service) + ts.sendHeartbeatServiceEvent(service) + ts.sendEndServiceEvent(service) wantEvents := []*event{ { @@ -184,9 +178,7 @@ func Test_telemetrySender_name_provided(t *testing.T) { mSender := mocksender.NewMockSender("test-servicediscovery") mSender.SetupAcceptAll() - mTimer := NewMocktimer(ctrl) now := time.Date(2024, 5, 13, 0, 0, 0, 0, time.UTC) - mTimer.EXPECT().Now().Return(now).AnyTimes() host := "test-host" _, mHostname := hostnameinterface.NewMock(hostnameinterface.MockHostname(host)) @@ -194,30 +186,26 @@ func Test_telemetrySender_name_provided(t *testing.T) { ts := newTelemetrySender(mSender) ts.hostname = mHostname - svc := serviceInfo{ - service: model.Service{ - PID: 55, - CommandLine: []string{"foo", "--option"}, - StartTimeMilli: uint64(now.Add(-20 * time.Minute).UnixMilli()), - GeneratedName: "generated-name2", - GeneratedNameSource: "generated-name-source2", - ContainerServiceName: "container-service-name2", - ContainerServiceNameSource: "service", - DDService: "dd-service-provided", - ContainerID: "abcd", - }, - meta: ServiceMetadata{ - Name: "test-service", - Language: "jvm", - Type: "web_service", - APMInstrumentation: "injected", - }, - LastHeartbeat: now, + service := model.Service{ + PID: 55, + Name: "test-service", + GeneratedName: "generated-name2", + GeneratedNameSource: "generated-name-source2", + ContainerServiceName: "container-service-name2", + ContainerServiceNameSource: "service", + DDService: "dd-service-provided", + APMInstrumentation: "injected", + Language: "jvm", + Type: "web_service", + CommandLine: []string{"foo", "--option"}, + StartTimeMilli: uint64(now.Add(-20 * time.Minute).UnixMilli()), + ContainerID: "abcd", + LastHeartbeat: now.Unix(), } - ts.sendStartServiceEvent(svc) - ts.sendHeartbeatServiceEvent(svc) - ts.sendEndServiceEvent(svc) + ts.sendStartServiceEvent(service) + ts.sendHeartbeatServiceEvent(service) + ts.sendEndServiceEvent(service) wantEvents := []*event{ { diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index a03cbd6f1ae391..7feb9dafaf9791 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -11,14 +11,11 @@ import ( "encoding/json" "fmt" "net/http" - "time" sysprobeclient "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/servicetype" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/log" ) //go:generate mockgen -source=$GOFILE -package=$GOPACKAGE -destination=impl_linux_mock.go @@ -29,20 +26,12 @@ func init() { type linuxImpl struct { getDiscoveryServices func(client *http.Client) (*model.ServicesResponse, error) - time timer - - aliveServices map[int]*serviceInfo - potentialServices map[int]*serviceInfo - - sysProbeClient *http.Client + sysProbeClient *http.Client } func newLinuxImpl() (osImpl, error) { return &linuxImpl{ getDiscoveryServices: getDiscoveryServices, - time: realTime{}, - aliveServices: make(map[int]*serviceInfo), - potentialServices: make(map[int]*serviceInfo), sysProbeClient: sysprobeclient.Get(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")), }, nil } @@ -71,109 +60,6 @@ func getDiscoveryServices(client *http.Client) (*model.ServicesResponse, error) return res, nil } -func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { - response, err := li.getDiscoveryServices(li.sysProbeClient) - if err != nil { - return nil, err - } - - // The endpoint could be refactored in the future to return a map to avoid this. - serviceMap := make(map[int]*model.Service, len(response.Services)) - for _, service := range response.Services { - serviceMap[service.PID] = &service - } - - events := serviceEvents{} - now := li.time.Now() - - li.handlePotentialServices(&events, now, serviceMap) - - // check open ports - these will be potential new services if they are still alive in the next iteration. - for _, service := range response.Services { - pid := service.PID - if _, ok := li.aliveServices[pid]; !ok { - log.Debugf("[pid: %d] found new process with open ports", pid) - - svc := li.getServiceInfo(service) - log.Debugf("[pid: %d] adding process to potential: %s", pid, svc.meta.Name) - li.potentialServices[pid] = &svc - } - } - - // check if services previously marked as alive still are. - for pid, svc := range li.aliveServices { - if service, ok := serviceMap[pid]; !ok { - delete(li.aliveServices, pid) - events.stop = append(events.stop, *svc) - } else if now.Sub(svc.LastHeartbeat).Truncate(time.Minute) >= heartbeatTime { - svc.LastHeartbeat = now - svc.service.RSS = service.RSS - svc.service.CPUCores = service.CPUCores - svc.service.ContainerID = service.ContainerID - svc.service.GeneratedName = service.GeneratedName - svc.service.ContainerServiceName = service.ContainerServiceName - svc.service.ContainerServiceNameSource = service.ContainerServiceNameSource - svc.service.Name = service.Name - svc.meta.Name = service.Name - events.heartbeat = append(events.heartbeat, *svc) - } - } - - return &discoveredServices{ - potentials: li.potentialServices, - runningServices: li.aliveServices, - events: events, - }, nil -} - -// handlePotentialServices checks cached potential services we have seen in the -// previous call of the check. If they are still alive, start events are sent -// for these services. -func (li *linuxImpl) handlePotentialServices(events *serviceEvents, now time.Time, serviceMap map[int]*model.Service) { - if len(li.potentialServices) == 0 { - return - } - - // potentialServices contains processes that we scanned in the previous - // iteration and had open ports. We check if they are still alive in this - // iteration, and if so, we send a start-service telemetry event. - for pid, svc := range li.potentialServices { - if service, ok := serviceMap[pid]; ok { - svc.LastHeartbeat = now - svc.service.RSS = service.RSS - svc.service.CPUCores = service.CPUCores - svc.service.ContainerID = service.ContainerID - svc.service.GeneratedName = service.GeneratedName - svc.service.ContainerServiceName = service.ContainerServiceName - svc.service.ContainerServiceNameSource = service.ContainerServiceNameSource - svc.service.Name = service.Name - svc.meta.Name = service.Name - - li.aliveServices[pid] = svc - events.start = append(events.start, *svc) - } - } - clear(li.potentialServices) -} - -func (li *linuxImpl) getServiceInfo(service model.Service) serviceInfo { - // if the process name is docker-proxy, we should talk to docker to get the process command line and env vars - // have to see how far this can go but not for the initial release - - // for now, docker-proxy is going on the ignore list - - serviceType := servicetype.Detect(service.Ports) - - meta := ServiceMetadata{ - Name: service.Name, - Language: service.Language, - Type: string(serviceType), - APMInstrumentation: service.APMInstrumentation, - } - - return serviceInfo{ - meta: meta, - service: service, - LastHeartbeat: li.time.Now(), - } +func (li *linuxImpl) DiscoverServices() (*model.ServicesResponse, error) { + return li.getDiscoveryServices(li.sysProbeClient) } diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go index 762a9a35d2b84f..f3a9b7668291bc 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go @@ -71,6 +71,7 @@ var ( DDServiceInjected: true, Ports: []uint16{8080}, APMInstrumentation: string(apm.None), + Type: "web_service", RSS: 100 * 1024 * 1024, CPUCores: 1.5, CommandLine: []string{"test-service-1"}, @@ -88,6 +89,7 @@ var ( DDServiceInjected: true, Ports: []uint16{8080}, APMInstrumentation: string(apm.None), + Type: "web_service", RSS: 200 * 1024 * 1024, CPUCores: 1.5, CommandLine: []string{"test-service-1"}, @@ -103,6 +105,7 @@ var ( ContainerServiceNameSource: "app", Language: "python", Ports: []uint16{5000}, + Type: "web_service", CommandLine: pythonCommandLine, StartTimeMilli: procLaunchedMilli, ContainerID: dummyContainerID, @@ -115,6 +118,7 @@ var ( ContainerServiceName: "test-service-1-container", ContainerServiceNameSource: "service", Ports: []uint16{5432}, + Type: "db", CommandLine: []string{"test-service-1"}, StartTimeMilli: procLaunchedMilli, ContainerID: dummyContainerID, @@ -172,31 +176,24 @@ func Test_linuxImpl(t *testing.T) { name: "basic", checkRun: []*checkRun{ { - servicesResp: &model.ServicesResponse{Services: []model.Service{ + servicesResp: &model.ServicesResponse{StartedServices: []model.Service{ portTCP5000, portTCP8080, }}, time: calcTime(0), }, { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP5000, - portTCP8080, - }}, - time: calcTime(1 * time.Minute), - }, - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ + servicesResp: &model.ServicesResponse{HeartbeatServices: []model.Service{ portTCP5000, portTCP8080UpdatedRSS, }}, time: calcTime(20 * time.Minute), }, { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP5000, + servicesResp: &model.ServicesResponse{StoppedServices: []model.Service{ + portTCP8080UpdatedRSS, }}, - time: calcTime(21 * time.Minute), + time: calcTime(20 * time.Minute), }, }, wantEvents: []*event{ @@ -217,7 +214,7 @@ func Test_linuxImpl(t *testing.T) { Env: "", StartTime: calcTime(0).Unix(), StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(1 * time.Minute).Unix(), + LastSeen: calcTime(0).Unix(), Ports: []uint16{8080}, PID: 99, CommandLine: []string{"test-service-1"}, @@ -296,7 +293,7 @@ func Test_linuxImpl(t *testing.T) { Env: "", StartTime: calcTime(0).Unix(), StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(1 * time.Minute).Unix(), + LastSeen: calcTime(0).Unix(), Ports: []uint16{5000}, PID: 500, ServiceLanguage: "python", @@ -333,32 +330,25 @@ func Test_linuxImpl(t *testing.T) { name: "repeated_service_name", checkRun: []*checkRun{ { - servicesResp: &model.ServicesResponse{Services: []model.Service{ + servicesResp: &model.ServicesResponse{StartedServices: []model.Service{ portTCP8080, portTCP5432, }}, time: calcTime(0), }, { - servicesResp: &model.ServicesResponse{Services: []model.Service{ + servicesResp: &model.ServicesResponse{HeartbeatServices: []model.Service{ portTCP8080, portTCP5432, }}, - time: calcTime(1 * time.Minute), + time: calcTime(20 * time.Minute), }, { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080, + servicesResp: &model.ServicesResponse{StoppedServices: []model.Service{ portTCP5432, }}, time: calcTime(20 * time.Minute), }, - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080, - }}, - time: calcTime(21 * time.Minute), - }, }, wantEvents: []*event{ { @@ -376,7 +366,7 @@ func Test_linuxImpl(t *testing.T) { Env: "", StartTime: calcTime(0).Unix(), StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(1 * time.Minute).Unix(), + LastSeen: calcTime(0).Unix(), Ports: []uint16{5432}, PID: 101, CommandLine: []string{"test-service-1"}, @@ -400,7 +390,7 @@ func Test_linuxImpl(t *testing.T) { Env: "", StartTime: calcTime(0).Unix(), StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(1 * time.Minute).Unix(), + LastSeen: calcTime(0).Unix(), Ports: []uint16{8080}, PID: 99, CommandLine: []string{"test-service-1"}, @@ -485,6 +475,29 @@ func Test_linuxImpl(t *testing.T) { }, } + makeServiceResponseWithTime := func(responseTime time.Time, resp *model.ServicesResponse) *model.ServicesResponse { + respWithTime := &model.ServicesResponse{ + StartedServices: make([]model.Service, 0, len(resp.StartedServices)), + StoppedServices: make([]model.Service, 0, len(resp.StoppedServices)), + HeartbeatServices: make([]model.Service, 0, len(resp.HeartbeatServices)), + } + + for _, service := range resp.StartedServices { + service.LastHeartbeat = responseTime.Unix() + respWithTime.StartedServices = append(respWithTime.StartedServices, service) + } + for _, service := range resp.StoppedServices { + service.LastHeartbeat = responseTime.Unix() + respWithTime.StoppedServices = append(respWithTime.StoppedServices, service) + } + for _, service := range resp.HeartbeatServices { + service.LastHeartbeat = responseTime.Unix() + respWithTime.HeartbeatServices = append(respWithTime.HeartbeatServices, service) + } + + return respWithTime + } + for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctrl := gomock.NewController(t) @@ -514,9 +527,8 @@ func Test_linuxImpl(t *testing.T) { // set mocks check.os.(*linuxImpl).getDiscoveryServices = func(_ *http.Client) (*model.ServicesResponse, error) { - return cr.servicesResp, nil + return makeServiceResponseWithTime(cr.time, cr.servicesResp), nil } - check.os.(*linuxImpl).time = mTimer check.sender.hostname = mHostname err = check.Run() diff --git a/pkg/collector/corechecks/servicediscovery/model/model.go b/pkg/collector/corechecks/servicediscovery/model/model.go index 9a99681fe6cfcf..1e9e4e0e6a54f1 100644 --- a/pkg/collector/corechecks/servicediscovery/model/model.go +++ b/pkg/collector/corechecks/servicediscovery/model/model.go @@ -20,14 +20,19 @@ type Service struct { Ports []uint16 `json:"ports"` APMInstrumentation string `json:"apm_instrumentation"` Language string `json:"language"` + Type string `json:"service_type"` RSS uint64 `json:"rss"` CommandLine []string `json:"cmdline"` StartTimeMilli uint64 `json:"start_time"` CPUCores float64 `json:"cpu_cores"` ContainerID string `json:"container_id"` + LastHeartbeat int64 `json:"last_heartbeat"` } // ServicesResponse is the response for the system-probe /discovery/services endpoint. type ServicesResponse struct { - Services []Service `json:"services"` + StartedServices []Service `json:"started_services"` + StoppedServices []Service `json:"stopped_services"` + HeartbeatServices []Service `json:"heartbeat_services"` + RunningServicesCount int `json:"running_services_count"` } diff --git a/pkg/collector/corechecks/servicediscovery/module/comm_test.go b/pkg/collector/corechecks/servicediscovery/module/comm_test.go index c8f5dc7cc77656..074e705314d3d5 100644 --- a/pkg/collector/corechecks/servicediscovery/module/comm_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/comm_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" httptestutil "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" ) @@ -30,8 +31,9 @@ const ( // TestIgnoreComm checks that the 'sshd' command is ignored and the 'node' command is not func TestIgnoreComm(t *testing.T) { serverDir := buildFakeServer(t) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) @@ -50,16 +52,21 @@ func TestIgnoreComm(t *testing.T) { goodPid := goodCmd.Process.Pid badPid := badCmd.Process.Pid + seen := make(map[int]model.Service) require.EventuallyWithT(t, func(collect *assert.CollectT) { - svcMap := getServicesMap(collect, url) - assert.Contains(collect, svcMap, goodPid) - assert.NotContains(collect, svcMap, badPid) + resp := getServices(collect, url) + for _, s := range resp.StartedServices { + seen[s.PID] = s + } + + assert.Contains(collect, seen, goodPid) + assert.NotContains(collect, seen, badPid) }, 30*time.Second, 100*time.Millisecond) } // TestIgnoreCommsLengths checks that the map contains names no longer than 15 bytes. func TestIgnoreCommsLengths(t *testing.T) { - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) require.Equal(t, len(discovery.config.ignoreComms), 10) @@ -115,7 +122,7 @@ func TestShouldIgnoreComm(t *testing.T) { serverBin := buildTestBin(t) serverDir := filepath.Dir(serverBin) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) require.NotEmpty(t, discovery.config.ignoreComms) require.Equal(t, len(discovery.config.ignoreComms), 10) @@ -202,7 +209,7 @@ func BenchmarkProcName(b *testing.B) { // BenchmarkShouldIgnoreComm benchmarks reading of command name from /proc//comm. func BenchmarkShouldIgnoreComm(b *testing.B) { - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) cmd := startProcessLongComm(b) b.ResetTimer() diff --git a/pkg/collector/corechecks/servicediscovery/module/config_test.go b/pkg/collector/corechecks/servicediscovery/module/config_test.go index eddf27b1c3253c..2c41d563db7d90 100644 --- a/pkg/collector/corechecks/servicediscovery/module/config_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/config_test.go @@ -57,7 +57,7 @@ func TestConfigIgnoredComms(t *testing.T) { commsStr := strings.Join(test.comms, " ") // intentionally multiple spaces for sensitivity testing mockSystemProbe.SetWithoutSource("discovery.ignored_command_names", commsStr) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) require.Equal(t, len(discovery.config.ignoreComms), len(test.comms)) @@ -74,7 +74,7 @@ func TestConfigIgnoredComms(t *testing.T) { t.Run("check default config length", func(t *testing.T) { mock.NewSystemProbe(t) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) assert.Equal(t, len(discovery.config.ignoreComms), 10) @@ -84,7 +84,7 @@ func TestConfigIgnoredComms(t *testing.T) { mock.NewSystemProbe(t) t.Setenv("DD_DISCOVERY_IGNORED_COMMAND_NAMES", "dummy1 dummy2") - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) _, found := discovery.config.ignoreComms["dummy1"] @@ -120,7 +120,7 @@ func TestConfigIgnoredServices(t *testing.T) { servicesStr := strings.Join(test.services, " ") // intentionally multiple spaces for sensitivity testing mockSystemProbe.SetWithoutSource("discovery.ignored_services", servicesStr) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) require.Equal(t, len(discovery.config.ignoreServices), len(test.services)) @@ -134,7 +134,7 @@ func TestConfigIgnoredServices(t *testing.T) { t.Run("check default number of services", func(t *testing.T) { mock.NewSystemProbe(t) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) assert.Equal(t, len(discovery.config.ignoreServices), 6) @@ -144,7 +144,7 @@ func TestConfigIgnoredServices(t *testing.T) { mock.NewSystemProbe(t) t.Setenv("DD_DISCOVERY_IGNORED_SERVICES", "service1 service2") - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) _, found := discovery.config.ignoreServices["service1"] diff --git a/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go b/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go index 317455f84fbbee..022d0b8be722fb 100644 --- a/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go @@ -66,7 +66,7 @@ func TestShouldIgnorePid(t *testing.T) { _ = cmd.Process.Kill() }) - discovery := newDiscovery(nil) + discovery := newDiscovery(nil, nil) require.NotEmpty(t, discovery) require.EventuallyWithT(t, func(collect *assert.CollectT) { diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index ac77a9fc2e8c97..ccb3dd0b58e302 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -30,6 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/servicetype" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" "github.com/DataDog/datadog-agent/pkg/network" @@ -42,10 +43,11 @@ import ( const ( pathServices = "/services" + heartbeatTime = 15 * time.Minute // Use a low cache validity to ensure that we refresh information every time // the check is run if needed. This is the same as cacheValidityNoRT in // pkg/process/checks/container.go. - containerCacheValidatity = 2 * time.Second + containerCacheValidity = 2 * time.Second ) // Ensure discovery implements the module.Module interface. @@ -54,18 +56,77 @@ var _ module.Module = &discovery{} // serviceInfo holds process data that should be cached between calls to the // endpoint. type serviceInfo struct { - generatedName string - generatedNameSource string - containerServiceName string - ddServiceName string - ddServiceInjected bool - checkedContainerData bool - language language.Language - apmInstrumentation apm.Instrumentation - cmdLine []string - startTimeMilli uint64 - cpuTime uint64 - cpuUsage float64 + name string + generatedName string + generatedNameSource string + containerServiceName string + containerServiceNameSource string + ddServiceName string + ddServiceInjected bool + ports []uint16 + checkedContainerData bool + language language.Language + apmInstrumentation apm.Instrumentation + cmdLine []string + startTimeMilli uint64 + rss uint64 + cpuTime uint64 + cpuUsage float64 + containerID string + lastHeartbeat int64 +} + +// toModelService fills the model.Service struct pointed to by out, using the +// service info to do it. +func (i *serviceInfo) toModelService(pid int32, out *model.Service) *model.Service { + if i == nil { + log.Warn("toModelService called with nil pointer") + return nil + } + + out.PID = int(pid) + out.Name = i.name + out.GeneratedName = i.generatedName + out.GeneratedNameSource = i.generatedNameSource + out.ContainerServiceName = i.containerServiceName + out.ContainerServiceNameSource = i.containerServiceNameSource + out.DDService = i.ddServiceName + out.DDServiceInjected = i.ddServiceInjected + out.Ports = i.ports + out.APMInstrumentation = string(i.apmInstrumentation) + out.Language = string(i.language) + out.Type = string(servicetype.Detect(i.ports)) + out.RSS = i.rss + out.CommandLine = i.cmdLine + out.StartTimeMilli = i.startTimeMilli + out.CPUCores = i.cpuUsage + out.ContainerID = i.containerID + out.LastHeartbeat = i.lastHeartbeat + + return out +} + +type timeProvider interface { + Now() time.Time +} + +type realTime struct{} + +func (realTime) Now() time.Time { return time.Now() } + +type pidSet map[int32]struct{} + +func (s pidSet) has(pid int32) bool { + _, present := s[pid] + return present +} + +func (s pidSet) add(pid int32) { + s[pid] = struct{}{} +} + +func (s pidSet) remove(pid int32) { + delete(s, pid) } // discovery is an implementation of the Module interface for the discovery module. @@ -73,11 +134,20 @@ type discovery struct { config *discoveryConfig mux *sync.RWMutex + // cache maps pids to data that should be cached between calls to the endpoint. cache map[int32]*serviceInfo - // ignorePids processes to be excluded from discovery - ignorePids map[int32]struct{} + // potentialServices stores processes that we have seen once in the previous + // iteration, but not yet confirmed to be a running service. + potentialServices pidSet + + // runningServices stores services that we have previously confirmed as + // running. + runningServices pidSet + + // ignorePids stores processes to be excluded from discovery + ignorePids pidSet // privilegedDetector is used to detect the language of a process. privilegedDetector privileged.LanguageDetector @@ -93,24 +163,28 @@ type discovery struct { lastCPUTimeUpdate time.Time containerProvider proccontainers.ContainerProvider + timeProvider timeProvider } -func newDiscovery(containerProvider proccontainers.ContainerProvider) *discovery { +func newDiscovery(containerProvider proccontainers.ContainerProvider, tp timeProvider) *discovery { return &discovery{ config: newConfig(), mux: &sync.RWMutex{}, cache: make(map[int32]*serviceInfo), - ignorePids: make(map[int32]struct{}), + potentialServices: make(pidSet), + runningServices: make(pidSet), + ignorePids: make(pidSet), privilegedDetector: privileged.NewLanguageDetector(), scrubber: procutil.NewDefaultDataScrubber(), containerProvider: containerProvider, + timeProvider: tp, } } // NewDiscoveryModule creates a new discovery system probe module. func NewDiscoveryModule(_ *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { sharedContainerProvider := proccontainers.InitSharedContainerProvider(deps.WMeta, deps.Tagger) - return newDiscovery(sharedContainerProvider), nil + return newDiscovery(sharedContainerProvider, realTime{}), nil } // GetStats returns the stats of the discovery module. @@ -121,6 +195,7 @@ func (s *discovery) GetStats() map[string]interface{} { // Register registers the discovery module with the provided HTTP mux. func (s *discovery) Register(httpMux *module.Router) error { httpMux.HandleFunc("/status", s.handleStatusEndpoint) + httpMux.HandleFunc("/debug", s.handleDebugEndpoint) httpMux.HandleFunc(pathServices, utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, s.handleServices)) return nil } @@ -139,6 +214,46 @@ func (s *discovery) handleStatusEndpoint(w http.ResponseWriter, _ *http.Request) _, _ = w.Write([]byte("Discovery Module is running")) } +func (s *discovery) handleDebugEndpoint(w http.ResponseWriter, _ *http.Request) { + services := make([]model.Service, 0) + + procRoot := kernel.ProcFSRoot() + pids, err := process.Pids() + if err != nil { + utils.WriteAsJSON(w, "could not get PIDs") + return + } + + context := parsingContext{ + procRoot: procRoot, + netNsInfo: make(map[uint32]*namespaceInfo), + } + + containers, _, pidToCid, err := s.containerProvider.GetContainers(containerCacheValidity, nil) + if err != nil { + log.Errorf("could not get containers: %s", err) + } + + // Build mapping of Container ID to container object to avoid traversal of + // the containers slice for every services. + containersMap := make(map[string]*agentPayload.Container, len(containers)) + for _, c := range containers { + containersMap[c.Id] = c + } + + for _, pid := range pids { + service := s.getService(context, pid) + if service == nil { + continue + } + s.enrichContainerData(service, containersMap, pidToCid) + + services = append(services, *service) + } + + utils.WriteAsJSON(w, services) +} + // handleServers is the handler for the /services endpoint. // Returns the list of currently running services. func (s *discovery) handleServices(w http.ResponseWriter, _ *http.Request) { @@ -149,10 +264,7 @@ func (s *discovery) handleServices(w http.ResponseWriter, _ *http.Request) { return } - resp := &model.ServicesResponse{ - Services: *services, - } - utils.WriteAsJSON(w, resp) + utils.WriteAsJSON(w, services) } const prefix = "socket:[" @@ -374,19 +486,6 @@ func (s *discovery) shouldIgnoreService(name string) bool { return found } -// cleanIgnoredPids removes dead PIDs from the list of ignored processes. -func (s *discovery) cleanIgnoredPids(alivePids map[int32]struct{}) { - s.mux.Lock() - defer s.mux.Unlock() - - for pid := range s.ignorePids { - if _, alive := alivePids[pid]; alive { - continue - } - delete(s.ignorePids, pid) - } -} - // getServiceInfo gets the service information for a process using the // servicedetector module. func (s *discovery) getServiceInfo(pid int32) (*serviceInfo, error) { @@ -430,7 +529,13 @@ func (s *discovery) getServiceInfo(pid int32) (*serviceInfo, error) { nameMeta := servicediscovery.GetServiceName(lang, ctx) apmInstrumentation := apm.Detect(lang, ctx) + name := nameMeta.DDService + if name == "" { + name = nameMeta.Name + } + return &serviceInfo{ + name: name, generatedName: nameMeta.Name, generatedNameSource: string(nameMeta.Source), ddServiceName: nameMeta.DDService, @@ -533,40 +638,26 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service s.mux.Unlock() } - name := info.ddServiceName - if name == "" { - name = info.generatedName - } - if s.shouldIgnoreService(name) { + if s.shouldIgnoreService(info.name) { s.addIgnoredPid(pid) return nil } - return &model.Service{ - PID: int(pid), - Name: name, - GeneratedName: info.generatedName, - GeneratedNameSource: info.generatedNameSource, - DDService: info.ddServiceName, - DDServiceInjected: info.ddServiceInjected, - Ports: ports, - APMInstrumentation: string(info.apmInstrumentation), - Language: string(info.language), - RSS: rss, - CommandLine: info.cmdLine, - StartTimeMilli: info.startTimeMilli, - CPUCores: info.cpuUsage, - } + service := &model.Service{} + info.toModelService(pid, service) + service.Ports = ports + service.RSS = rss + + return service } // cleanCache deletes dead PIDs from the cache. Note that this does not actually // shrink the map but should free memory for the service name strings referenced -// from it. -func (s *discovery) cleanCache(alivePids map[int32]struct{}) { - s.mux.Lock() - defer s.mux.Unlock() +// from it. This function is not thread-safe and it is up to the caller to ensure +// s.mux is locked. +func (s *discovery) cleanCache(alivePids pidSet) { for pid := range s.cache { - if _, alive := alivePids[pid]; alive { + if alivePids.has(pid) { continue } @@ -574,12 +665,24 @@ func (s *discovery) cleanCache(alivePids map[int32]struct{}) { } } -// updateServicesCPUStats updates the CPU stats of cached services, as well as the -// global CPU time cache for future updates. -func (s *discovery) updateServicesCPUStats(services []model.Service) error { - s.mux.Lock() - defer s.mux.Unlock() +// cleanPidSets deletes dead PIDs from the provided pidSets. This function is not +// thread-safe and it is up to the caller to ensure s.mux is locked. +func (s *discovery) cleanPidSets(alivePids pidSet, sets ...pidSet) { + for _, set := range sets { + for pid := range set { + if alivePids.has(pid) { + continue + } + + delete(set, pid) + } + } +} +// updateServicesCPUStats updates the CPU stats of cached services, as well as the +// global CPU time cache for future updates. This function is not thread-safe and +// it is up to the caller to ensure s.mux is locked. +func (s *discovery) updateServicesCPUStats(response *model.ServicesResponse) error { if time.Since(s.lastCPUTimeUpdate) < s.config.cpuUsageUpdateDelay { return nil } @@ -589,17 +692,25 @@ func (s *discovery) updateServicesCPUStats(services []model.Service) error { return fmt.Errorf("could not get global CPU time: %w", err) } - for i := range services { - service := &services[i] - serviceInfo, ok := s.cache[int32(service.PID)] - if !ok { - continue - } + for pid, info := range s.cache { + _ = updateCPUCoresStats(int(pid), info, s.lastGlobalCPUTime, globalCPUTime) + } + + updateResponseCPUStats := func(services []model.Service) { + for i := range services { + service := &services[i] + info, ok := s.cache[int32(service.PID)] + if !ok { + continue + } - _ = updateCPUCoresStats(service.PID, serviceInfo, s.lastGlobalCPUTime, globalCPUTime) - service.CPUCores = serviceInfo.cpuUsage + service.CPUCores = info.cpuUsage + } } + updateResponseCPUStats(response.StartedServices) + updateResponseCPUStats(response.HeartbeatServices) + s.lastGlobalCPUTime = globalCPUTime s.lastCPUTimeUpdate = time.Now() @@ -680,13 +791,63 @@ func (s *discovery) enrichContainerData(service *model.Service, containers map[s serviceInfo, ok := s.cache[int32(service.PID)] if ok { serviceInfo.containerServiceName = serviceName + serviceInfo.containerServiceNameSource = tagName serviceInfo.checkedContainerData = true + serviceInfo.containerID = id } s.mux.Unlock() } +func (s *discovery) updateCacheInfo(response *model.ServicesResponse, now time.Time) { + updateCachedHeartbeat := func(service *model.Service) { + info, ok := s.cache[int32(service.PID)] + if !ok { + log.Warnf("could not access service info from the cache when update last heartbeat for PID %v start event", service.PID) + return + } + + info.lastHeartbeat = now.Unix() + info.ports = service.Ports + info.rss = service.RSS + } + + for i := range response.StartedServices { + service := &response.StartedServices[i] + updateCachedHeartbeat(service) + } + + for i := range response.HeartbeatServices { + service := &response.HeartbeatServices[i] + updateCachedHeartbeat(service) + } +} + +// handleStoppedServices verifies services previously seen and registered as +// running are still alive. If not, it will use the latest cached information +// about them to generate a stop event for the service. This function is not +// thread-safe and it is up to the caller to ensure s.mux is locked. +func (s *discovery) handleStoppedServices(response *model.ServicesResponse, alivePids pidSet) { + for pid := range s.runningServices { + if alivePids.has(pid) { + continue + } + + s.runningServices.remove(pid) + info, ok := s.cache[pid] + if !ok { + log.Warnf("could not get service from the cache to generate a stopped service event for PID %v", pid) + continue + } + delete(s.cache, pid) + + // Build service struct in place in the slice + response.StoppedServices = append(response.StoppedServices, model.Service{}) + info.toModelService(pid, &response.StoppedServices[len(response.StoppedServices)-1]) + } +} + // getStatus returns the list of currently running services. -func (s *discovery) getServices() (*[]model.Service, error) { +func (s *discovery) getServices() (*model.ServicesResponse, error) { procRoot := kernel.ProcFSRoot() pids, err := process.Pids() if err != nil { @@ -698,9 +859,14 @@ func (s *discovery) getServices() (*[]model.Service, error) { netNsInfo: make(map[uint32]*namespaceInfo), } - var services []model.Service - alivePids := make(map[int32]struct{}, len(pids)) - containers, _, pidToCid, err := s.containerProvider.GetContainers(containerCacheValidatity, nil) + response := &model.ServicesResponse{ + StartedServices: make([]model.Service, 0, len(s.potentialServices)), + StoppedServices: make([]model.Service, 0), + HeartbeatServices: make([]model.Service, 0), + } + + alivePids := make(pidSet, len(pids)) + containers, _, pidToCid, err := s.containerProvider.GetContainers(containerCacheValidity, nil) if err != nil { log.Errorf("could not get containers: %s", err) } @@ -712,8 +878,10 @@ func (s *discovery) getServices() (*[]model.Service, error) { containersMap[c.Id] = c } + now := s.timeProvider.Now() + for _, pid := range pids { - alivePids[pid] = struct{}{} + alivePids.add(pid) service := s.getService(context, pid) if service == nil { @@ -721,15 +889,44 @@ func (s *discovery) getServices() (*[]model.Service, error) { } s.enrichContainerData(service, containersMap, pidToCid) - services = append(services, *service) + if _, ok := s.runningServices[pid]; ok { + if serviceHeartbeatTime := time.Unix(service.LastHeartbeat, 0); now.Sub(serviceHeartbeatTime).Truncate(time.Minute) >= heartbeatTime { + service.LastHeartbeat = now.Unix() + response.HeartbeatServices = append(response.HeartbeatServices, *service) + } + + continue + } + + if _, ok := s.potentialServices[pid]; ok { + // We have seen it first in the previous call of getServices, so it + // is confirmed to be running. + s.runningServices.add(pid) + delete(s.potentialServices, pid) + service.LastHeartbeat = now.Unix() + response.StartedServices = append(response.StartedServices, *service) + continue + } + + // This is a new potential service + s.potentialServices.add(pid) + log.Debugf("[pid: %d] adding process to potential: %s", pid, service.Name) } + s.mux.Lock() + defer s.mux.Unlock() + + s.updateCacheInfo(response, now) + s.handleStoppedServices(response, alivePids) + s.cleanCache(alivePids) - s.cleanIgnoredPids(alivePids) + s.cleanPidSets(alivePids, s.ignorePids, s.potentialServices) - if err = s.updateServicesCPUStats(services); err != nil { + if err = s.updateServicesCPUStats(response); err != nil { log.Warnf("updating services CPU stats: %s", err) } - return &services, nil + response.RunningServicesCount = len(s.runningServices) + + return response, nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 9cbfaae0fc9a95..bc47b069ab9700 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -44,6 +44,7 @@ import ( taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" @@ -60,7 +61,19 @@ import ( dockerutils "github.com/DataDog/datadog-agent/pkg/util/testutil/docker" ) -func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContainerProvider) { +var mockedTime = time.Date(2024, 12, 1, 12, 12, 12, 2, time.UTC) + +func findService(pid int, services []model.Service) *model.Service { + for _, s := range services { + if s.PID == pid { + return &s + } + } + + return nil +} + +func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContainerProvider, *servicediscovery.Mocktimer) { t.Helper() wmeta := fxutil.Test[workloadmeta.Component](t, @@ -72,6 +85,8 @@ func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContai mockCtrl := gomock.NewController(t) mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) + mTimeProvider := servicediscovery.NewMocktimer(mockCtrl) + mux := gorillamux.NewRouter() cfg := &types.Config{ Enabled: true, @@ -83,7 +98,7 @@ func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContai Name: config.DiscoveryModule, ConfigNamespaces: []string{"discovery"}, Fn: func(*types.Config, module.FactoryDependencies) (module.Module, error) { - module := newDiscovery(mockContainerProvider) + module := newDiscovery(mockContainerProvider, mTimeProvider) module.config.cpuUsageUpdateDelay = time.Second return module, nil @@ -97,10 +112,10 @@ func setupDiscoveryModule(t *testing.T) (string, *proccontainersmocks.MockContai srv := httptest.NewServer(mux) t.Cleanup(srv.Close) - return srv.URL, mockContainerProvider + return srv.URL, mockContainerProvider, mTimeProvider } -func getServices(t require.TestingT, url string) []model.Service { +func getServices(t require.TestingT, url string) *model.ServicesResponse { location := url + "/" + string(config.DiscoveryModule) + pathServices req, err := http.NewRequest(http.MethodGet, location, nil) require.NoError(t, err) @@ -112,19 +127,8 @@ func getServices(t require.TestingT, url string) []model.Service { res := &model.ServicesResponse{} err = json.NewDecoder(resp.Body).Decode(res) require.NoError(t, err) - require.NotEmpty(t, res) - - return res.Services -} - -func getServicesMap(t require.TestingT, url string) map[int]model.Service { - services := getServices(t, url) - servicesMap := make(map[int]model.Service) - for _, service := range services { - servicesMap[service.PID] = service - } - return servicesMap + return res } func startTCPServer(t *testing.T, proto string, address string) (*os.File, *net.TCPAddr) { @@ -199,14 +203,15 @@ func startProcessWithFile(t *testing.T, f *os.File) *exec.Cmd { // Check that we get (only) listening processes for all expected protocols. func TestBasic(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() var expectedPIDs []int var unexpectedPIDs []int expectedPorts := make(map[int]int) - var startTCP = func(proto string) { + startTCP := func(proto string) { f, server := startTCPServer(t, proto, "") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) @@ -217,7 +222,7 @@ func TestBasic(t *testing.T) { unexpectedPIDs = append(unexpectedPIDs, cmd.Process.Pid) } - var startUDP = func(proto string) { + startUDP := func(proto string) { f, server := startUDPServer(t, proto, ":8083") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) @@ -233,33 +238,36 @@ func TestBasic(t *testing.T) { startUDP("udp4") startUDP("udp6") + seen := make(map[int]model.Service) // Eventually to give the processes time to start require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(collect, url) + resp := getServices(collect, url) + for _, s := range resp.StartedServices { + seen[s.PID] = s + } + for _, pid := range expectedPIDs { - assert.Contains(collect, portMap, pid) + assert.Contains(collect, seen, pid) + require.Contains(collect, seen[pid].Ports, uint16(expectedPorts[pid])) + require.Equal(collect, seen[pid].LastHeartbeat, mockedTime.Unix()) + assertStat(collect, seen[pid]) } for _, pid := range unexpectedPIDs { - assert.NotContains(collect, portMap, pid) + assert.NotContains(collect, seen, pid) } }, 30*time.Second, 100*time.Millisecond) - - serviceMap := getServicesMap(t, url) - for _, pid := range expectedPIDs { - require.Contains(t, serviceMap[pid].Ports, uint16(expectedPorts[pid])) - assertStat(t, serviceMap[pid]) - } } // Check that we get all listening ports for a process func TestPorts(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() var expectedPorts []uint16 var unexpectedPorts []uint16 - var startTCP = func(proto string) { + startTCP := func(proto string) { serverf, server := startTCPServer(t, proto, "") t.Cleanup(func() { serverf.Close() }) clientf, client := startTCPClient(t, proto, server) @@ -269,7 +277,7 @@ func TestPorts(t *testing.T) { unexpectedPorts = append(unexpectedPorts, uint16(client.Port)) } - var startUDP = func(proto string) { + startUDP := func(proto string) { serverf, server := startUDPServer(t, proto, ":8083") t.Cleanup(func() { _ = serverf.Close() }) clientf, client := startUDPClient(t, proto, server) @@ -290,12 +298,16 @@ func TestPorts(t *testing.T) { expectedPortsMap := make(map[uint16]struct{}, len(expectedPorts)) - serviceMap := getServicesMap(t, url) pid := os.Getpid() - require.Contains(t, serviceMap, pid) + // Firt call will not return anything, as all services will be potentials. + _ = getServices(t, url) + resp := getServices(t, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(t, startEvent, "could not find start event for pid %v", pid) + for _, port := range expectedPorts { expectedPortsMap[port] = struct{}{} - assert.Contains(t, serviceMap[pid].Ports, port) + assert.Contains(t, startEvent.Ports, port) } for _, port := range unexpectedPorts { // An unexpected port number can also be expected since UDP and TCP and @@ -305,17 +317,18 @@ func TestPorts(t *testing.T) { continue } - assert.NotContains(t, serviceMap[pid].Ports, port) + assert.NotContains(t, startEvent.Ports, port) } } func TestPortsLimits(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() var expectedPorts []int - var openPort = func(address string) { + openPort := func(address string) { serverf, server := startTCPServer(t, "tcp4", address) t.Cleanup(func() { serverf.Close() }) @@ -332,21 +345,26 @@ func TestPortsLimits(t *testing.T) { slices.Sort(expectedPorts) - serviceMap := getServicesMap(t, url) pid := os.Getpid() - require.Contains(t, serviceMap, pid) - ports := serviceMap[pid].Ports - assert.Contains(t, ports, uint16(8081)) - assert.Contains(t, ports, uint16(8082)) - assert.Len(t, ports, maxNumberOfPorts) + + // Firt call will not return anything, as all services will be potentials. + _ = getServices(t, url) + resp := getServices(t, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(t, startEvent, "could not find start event for pid %v", pid) + + assert.Contains(t, startEvent.Ports, uint16(8081)) + assert.Contains(t, startEvent.Ports, uint16(8082)) + assert.Len(t, startEvent.Ports, maxNumberOfPorts) for i := 0; i < maxNumberOfPorts-2; i++ { - assert.Contains(t, ports, uint16(expectedPorts[i])) + assert.Contains(t, startEvent.Ports, uint16(expectedPorts[i])) } } func TestServiceName(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() listener, err := net.Listen("tcp", "") require.NoError(t, err) @@ -373,21 +391,125 @@ func TestServiceName(t *testing.T) { pid := cmd.Process.Pid // Eventually to give the processes time to start require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(collect, url) - assert.Contains(collect, portMap, pid) + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + // Non-ASCII character removed due to normalization. - assert.Equal(collect, "foo_bar", portMap[pid].DDService) - assert.Equal(collect, portMap[pid].DDService, portMap[pid].Name) - assert.Equal(collect, "sleep", portMap[pid].GeneratedName) - assert.Equal(collect, string(usm.CommandLine), portMap[pid].GeneratedNameSource) - assert.False(collect, portMap[pid].DDServiceInjected) - assert.Equal(collect, portMap[pid].ContainerID, "") + assert.Equal(collect, "foo_bar", startEvent.DDService) + assert.Equal(collect, startEvent.DDService, startEvent.Name) + assert.Equal(collect, "sleep", startEvent.GeneratedName) + assert.Equal(collect, string(usm.CommandLine), startEvent.GeneratedNameSource) + assert.False(collect, startEvent.DDServiceInjected) + assert.Equal(collect, startEvent.ContainerID, "") + assert.Equal(collect, startEvent.LastHeartbeat, mockedTime.Unix()) }, 30*time.Second, 100*time.Millisecond) } +func TestServiceLifetime(t *testing.T) { + startService := func() (*exec.Cmd, context.CancelFunc) { + listener, err := net.Listen("tcp", "") + require.NoError(t, err) + f, err := listener.(*net.TCPListener).File() + listener.Close() + + // Disable close-on-exec so that the sleep gets it + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + disableCloseOnExec(t, f) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { cancel() }) + + cmd := exec.CommandContext(ctx, "sleep", "1000") + cmd.Dir = "/tmp/" + cmd.Env = append(cmd.Env, "DD_SERVICE=foo_bar") + err = cmd.Start() + require.NoError(t, err) + f.Close() + + return cmd, cancel + } + + checkService := func(t assert.TestingT, service *model.Service, expectedTime time.Time) { + // Non-ASCII character removed due to normalization. + assert.Equal(t, "foo_bar", service.DDService) + assert.Equal(t, service.DDService, service.Name) + assert.Equal(t, "sleep", service.GeneratedName) + assert.Equal(t, string(usm.CommandLine), service.GeneratedNameSource) + assert.False(t, service.DDServiceInjected) + assert.Equal(t, service.ContainerID, "") + assert.Equal(t, service.LastHeartbeat, expectedTime.Unix()) + } + + stopService := func(cmd *exec.Cmd, cancel context.CancelFunc) { + cancel() + _ = cmd.Wait() + } + + t.Run("stop", func(t *testing.T) { + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() + + // Start the service and check we found it. + cmd, cancel := startService() + pid := cmd.Process.Pid + require.EventuallyWithT(t, func(collect *assert.CollectT) { + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + checkService(collect, startEvent, mockedTime) + }, 30*time.Second, 100*time.Millisecond) + + // Stop the service, and look for the stop event. + stopService(cmd, cancel) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + resp := getServices(collect, url) + stopEvent := findService(pid, resp.StoppedServices) + t.Logf("stopped service: %+v", resp.StoppedServices) + require.NotNilf(collect, stopEvent, "could not find stop event for pid %v", pid) + checkService(collect, stopEvent, mockedTime) + }, 30*time.Second, 100*time.Millisecond) + }) + + t.Run("heartbeat", func(t *testing.T) { + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + + startEventSeen := false + + mTimeProvider.EXPECT().Now().DoAndReturn(func() time.Time { + if !startEventSeen { + return mockedTime + } + + return mockedTime.Add(heartbeatTime) + }).AnyTimes() + + cmd, cancel := startService() + t.Cleanup(cancel) + + pid := cmd.Process.Pid + require.EventuallyWithT(t, func(collect *assert.CollectT) { + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + checkService(collect, startEvent, mockedTime) + }, 30*time.Second, 100*time.Millisecond) + + startEventSeen = true + resp := getServices(t, url) + heartbeatEvent := findService(pid, resp.HeartbeatServices) + require.NotNilf(t, heartbeatEvent, "could not find hearteat event for pid %v", pid) + checkService(t, heartbeatEvent, mockedTime.Add(heartbeatTime)) + }) +} + func TestInjectedServiceName(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() createEnvsMemfd(t, []string{ "OTHER_ENV=test", @@ -401,21 +523,27 @@ func TestInjectedServiceName(t *testing.T) { t.Cleanup(func() { listener.Close() }) pid := os.Getpid() - portMap := getServicesMap(t, url) - require.Contains(t, portMap, pid) - require.Equal(t, "injected-service-name", portMap[pid].DDService) - require.Equal(t, portMap[pid].DDService, portMap[pid].Name) + + // Firt call will not return anything, as all services will be potentials. + _ = getServices(t, url) + resp := getServices(t, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(t, startEvent, "could not find start event for pid %v", pid) + + require.Equal(t, "injected-service-name", startEvent.DDService) + require.Equal(t, startEvent.DDService, startEvent.Name) // The GeneratedName can vary depending on how the tests are run, so don't // assert for a specific value. - require.NotEmpty(t, portMap[pid].GeneratedName) - require.NotEmpty(t, portMap[pid].GeneratedNameSource) - require.NotEqual(t, portMap[pid].DDService, portMap[pid].GeneratedName) - assert.True(t, portMap[pid].DDServiceInjected) + require.NotEmpty(t, startEvent.GeneratedName) + require.NotEmpty(t, startEvent.GeneratedNameSource) + require.NotEqual(t, startEvent.DDService, startEvent.GeneratedName) + assert.True(t, startEvent.DDServiceInjected) } func TestAPMInstrumentationInjected(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() createEnvsMemfd(t, []string{ "DD_INJECTION_ENABLED=service_name,tracer", @@ -426,9 +554,14 @@ func TestAPMInstrumentationInjected(t *testing.T) { t.Cleanup(func() { listener.Close() }) pid := os.Getpid() - portMap := getServicesMap(t, url) - require.Contains(t, portMap, pid) - require.Equal(t, string(apm.Injected), portMap[pid].APMInstrumentation) + + // Firt call will not return anything, as all services will be potentials. + _ = getServices(t, url) + resp := getServices(t, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(t, startEvent, "could not find start event for pid %v", pid) + + require.Equal(t, string(apm.Injected), startEvent.APMInstrumentation) } func makeAlias(t *testing.T, alias string, serverBin string) string { @@ -479,7 +612,7 @@ func TestPythonFromBashScript(t *testing.T) { func testCaptureWrappedCommands(t *testing.T, script string, commandWrapper []string, validator func(service model.Service) bool) { // Changing permissions - require.NoError(t, os.Chmod(script, 0755)) + require.NoError(t, os.Chmod(script, 0o755)) commandLineArgs := append(commandWrapper, script) cmd := exec.Command(commandLineArgs[0], commandLineArgs[1:]...) @@ -511,13 +644,16 @@ func testCaptureWrappedCommands(t *testing.T, script string, commandWrapper []st } t.Cleanup(func() { _ = proc.Kill() }) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() + pid := int(proc.Pid) require.EventuallyWithT(t, func(collect *assert.CollectT) { - svcMap := getServicesMap(collect, url) - assert.Contains(collect, svcMap, pid) - assert.True(collect, validator(svcMap[pid])) + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + assert.True(collect, validator(*startEvent)) }, 30*time.Second, 100*time.Millisecond) } @@ -552,8 +688,9 @@ func TestAPMInstrumentationProvided(t *testing.T) { } serverDir := buildFakeServer(t) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() for name, test := range testCases { t.Run(name, func(t *testing.T) { @@ -568,13 +705,21 @@ func TestAPMInstrumentationProvided(t *testing.T) { pid := cmd.Process.Pid + proc, err := process.NewProcess(int32(pid)) + require.NoError(t, err, "could not create gopsutil process handle") + require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(collect, url) - assert.Contains(collect, portMap, pid) - assert.Equal(collect, string(test.language), portMap[pid].Language) - assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) - assertStat(collect, portMap[pid]) - assertCPU(collect, url, pid) + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + + referenceValue, err := proc.Percent(0) + require.NoError(t, err, "could not get gopsutil cpu usage value") + + assert.Equal(collect, string(test.language), startEvent.Language) + assert.Equal(collect, string(apm.Provided), startEvent.APMInstrumentation) + assertStat(collect, *startEvent) + assert.InDelta(collect, referenceValue, startEvent.CPUCores*100, 10) }, 30*time.Second, 100*time.Millisecond) }) } @@ -615,27 +760,11 @@ func assertStat(t assert.TestingT, svc model.Service) { assert.InDelta(t, uint64(createTimeMs), svc.StartTimeMilli, 10000) } -func assertCPU(t require.TestingT, url string, pid int) { - proc, err := process.NewProcess(int32(pid)) - require.NoError(t, err, "could not create gopsutil process handle") - - // Compare CPU usage measurement over an interval. - _ = getServicesMap(t, url) - referenceValue, err := proc.Percent(1 * time.Second) - require.NoError(t, err, "could not get gopsutil cpu usage value") - - // Calling getServicesMap a second time us the CPU usage percentage since the last call, which should be close to gopsutil value. - portMap := getServicesMap(t, url) - assert.Contains(t, portMap, pid) - // gopsutil reports a percentage, while we are reporting a float between 0 and $(nproc), - // so we convert our value to a percentage. - assert.InDelta(t, referenceValue, portMap[pid].CPUCores*100, 10) -} - func TestCommandLineSanitization(t *testing.T) { serverDir := buildFakeServer(t) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) @@ -652,9 +781,10 @@ func TestCommandLineSanitization(t *testing.T) { pid := cmd.Process.Pid require.EventuallyWithT(t, func(collect *assert.CollectT) { - svcMap := getServicesMap(collect, url) - assert.Contains(collect, svcMap, pid) - assert.Equal(collect, sanitizedCommandLine, svcMap[pid].CommandLine) + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + assert.Equal(collect, sanitizedCommandLine, startEvent.CommandLine) }, 30*time.Second, 100*time.Millisecond) } @@ -666,20 +796,31 @@ func TestNodeDocker(t *testing.T) { nodeJSPID, err := nodejs.GetNodeJSDockerPID() require.NoError(t, err) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() + pid := int(nodeJSPID) + proc, err := process.NewProcess(int32(pid)) + require.NoError(t, err, "could not create gopsutil process handle") + require.EventuallyWithT(t, func(collect *assert.CollectT) { - svcMap := getServicesMap(collect, url) - assert.Contains(collect, svcMap, pid) + resp := getServices(collect, url) + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + + referenceValue, err := proc.Percent(0) + require.NoError(collect, err, "could not get gopsutil cpu usage value") + // test@... changed to test_... due to normalization. - assert.Equal(collect, "test_nodejs-https-server", svcMap[pid].GeneratedName) - assert.Equal(collect, string(usm.Nodejs), svcMap[pid].GeneratedNameSource) - assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name) - assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) - assertStat(collect, svcMap[pid]) - assertCPU(collect, url, pid) + assert.Equal(collect, "test_nodejs-https-server", startEvent.GeneratedName) + assert.Equal(collect, string(usm.Nodejs), startEvent.GeneratedNameSource) + assert.Equal(collect, startEvent.GeneratedName, startEvent.Name) + assert.Equal(collect, "provided", startEvent.APMInstrumentation) + assert.Equal(collect, "web_service", startEvent.Type) + assertStat(collect, *startEvent) + assert.InDelta(collect, referenceValue, startEvent.CPUCores*100, 10) }, 30*time.Second, 100*time.Millisecond) } @@ -725,16 +866,20 @@ func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { cmd, err := fileopener.OpenFromProcess(t, fake, test.lib) require.NoError(t, err) - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() pid := cmd.Process.Pid require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(collect, url) - assert.Contains(collect, portMap, pid) - assert.Equal(collect, string(test.language), portMap[pid].Language) - assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) - assertStat(collect, portMap[pid]) + resp := getServices(collect, url) + + // Start event assert + startEvent := findService(pid, resp.StartedServices) + require.NotNilf(collect, startEvent, "could not find start event for pid %v", pid) + assert.Equal(collect, string(test.language), startEvent.Language) + assert.Equal(collect, string(apm.Provided), startEvent.APMInstrumentation) + assertStat(collect, *startEvent) }, 30*time.Second, 100*time.Millisecond) }) } @@ -742,8 +887,9 @@ func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { // Check that we can get listening processes in other namespaces. func TestNamespaces(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).AnyTimes() + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() // Needed when changing namespaces runtime.LockOSThread() @@ -787,23 +933,25 @@ func TestNamespaces(t *testing.T) { netns.Set(origNs) + seen := make(map[int]model.Service) // Eventually to give the processes time to start require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(collect, url) + resp := getServices(collect, url) + for _, s := range resp.StartedServices { + seen[s.PID] = s + } + for _, pid := range pids { - assert.Contains(collect, portMap, pid) + require.Contains(collect, seen, pid) + assert.Contains(collect, seen[pid].Ports, uint16(expectedPorts[pid])) } }, 30*time.Second, 100*time.Millisecond) - - serviceMap := getServicesMap(t, url) - for _, pid := range pids { - require.Contains(t, serviceMap[pid].Ports, uint16(expectedPorts[pid])) - } } // Check that we are able to find services inside Docker containers. func TestDocker(t *testing.T) { - url, mockContainerProvider := setupDiscoveryModule(t) + url, mockContainerProvider, mTimeProvider := setupDiscoveryModule(t) + mTimeProvider.EXPECT().Now().Return(mockedTime).AnyTimes() dir, _ := testutil.CurDir() scanner, err := globalutils.NewScanner(regexp.MustCompile("Serving.*"), globalutils.NoPattern) @@ -832,7 +980,7 @@ func TestDocker(t *testing.T) { pid1111 = process.PID mockContainerProvider. EXPECT(). - GetContainers(containerCacheValidatity, nil). + GetContainers(containerCacheValidity, nil). Return( []*agentPayload.Container{ {Id: "dummyCID", Tags: []string{ @@ -844,7 +992,8 @@ func TestDocker(t *testing.T) { nil, map[int]string{ pid1111: "dummyCID", - }, nil) + }, nil). + AnyTimes() break } @@ -852,16 +1001,23 @@ func TestDocker(t *testing.T) { assert.NotZero(collect, pid1111) }, time.Second*10, time.Millisecond*20) - portMap := getServicesMap(t, url) - - require.Contains(t, portMap, pid1111) - require.Contains(t, portMap[pid1111].Ports, uint16(1234)) - require.Contains(t, portMap[pid1111].ContainerID, "dummyCID") - require.Contains(t, portMap[pid1111].Name, "http.server") - require.Contains(t, portMap[pid1111].GeneratedName, "http.server") - require.Contains(t, portMap[pid1111].GeneratedNameSource, string(usm.CommandLine)) - require.Contains(t, portMap[pid1111].ContainerServiceName, "foo_from_app_tag") - require.Contains(t, portMap[pid1111].ContainerServiceNameSource, "app") + // First endpoint call will not contain any events, because the service is + // still consider a potential service. The second call will have the events. + _ = getServices(t, url) + resp := getServices(t, url) + + // Assert events + startEvent := findService(pid1111, resp.StartedServices) + require.NotNilf(t, startEvent, "could not find start event for pid %v", pid1111) + require.Contains(t, startEvent.Ports, uint16(1234)) + require.Contains(t, startEvent.ContainerID, "dummyCID") + require.Contains(t, startEvent.Name, "http.server") + require.Contains(t, startEvent.GeneratedName, "http.server") + require.Contains(t, startEvent.GeneratedNameSource, string(usm.CommandLine)) + require.Contains(t, startEvent.ContainerServiceName, "foo_from_app_tag") + require.Contains(t, startEvent.ContainerServiceNameSource, "app") + require.Contains(t, startEvent.Type, "web_service") + require.Equal(t, startEvent.LastHeartbeat, mockedTime.Unix()) } // Check that the cache is cleaned when procceses die. @@ -870,8 +1026,8 @@ func TestCache(t *testing.T) { mockCtrl := gomock.NewController(t) mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) - mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).MinTimes(1) - discovery := newDiscovery(mockContainerProvider) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidity, nil).MinTimes(1) + discovery := newDiscovery(mockContainerProvider, realTime{}) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) @@ -1140,7 +1296,8 @@ func BenchmarkOldGetSockets(b *testing.B) { // addSockets adds only listening sockets to a map to be used for later looksups. func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, - family network.ConnectionFamily, ctype network.ConnectionType, state uint64) { + family network.ConnectionFamily, ctype network.ConnectionType, state uint64, +) { for _, sock := range sockets { if sock.St != state { continue diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index d00e0a0945e760..37d8e5b9a9a1d9 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -28,30 +28,10 @@ const ( CheckName = "service_discovery" refreshInterval = 1 * time.Minute - heartbeatTime = 15 * time.Minute ) -type serviceInfo struct { - meta ServiceMetadata - service model.Service - LastHeartbeat time.Time -} - -type serviceEvents struct { - start []serviceInfo - stop []serviceInfo - heartbeat []serviceInfo -} - -type discoveredServices struct { - potentials map[int]*serviceInfo - runningServices map[int]*serviceInfo - - events serviceEvents -} - type osImpl interface { - DiscoverServices() (*discoveredServices, error) + DiscoverServices() (*model.ServicesResponse, error) } var newOSImpl func() (osImpl, error) @@ -112,24 +92,21 @@ func (c *Check) Run() error { return nil } - disc, err := c.os.DiscoverServices() + response, err := c.os.DiscoverServices() if err != nil { return err } - log.Debugf("runningServices: %d | potentials: %d", - len(disc.runningServices), - len(disc.potentials), - ) - metricDiscoveredServices.Set(float64(len(disc.runningServices))) + log.Debugf("runningServices: %d", response.RunningServicesCount) + metricDiscoveredServices.Set(float64(response.RunningServicesCount)) - for _, p := range disc.events.start { + for _, p := range response.StartedServices { c.sender.sendStartServiceEvent(p) } - for _, p := range disc.events.heartbeat { + for _, p := range response.HeartbeatServices { c.sender.sendHeartbeatServiceEvent(p) } - for _, p := range disc.events.stop { + for _, p := range response.StoppedServices { c.sender.sendEndServiceEvent(p) } diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery_mock.go b/pkg/collector/corechecks/servicediscovery/servicediscovery_mock.go index 6591e5f3b21d1d..abb8dbc27bfece 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery_mock.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery_mock.go @@ -14,6 +14,8 @@ import ( time "time" gomock "github.com/golang/mock/gomock" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" ) // MockosImpl is a mock of osImpl interface. @@ -40,10 +42,10 @@ func (m *MockosImpl) EXPECT() *MockosImplMockRecorder { } // DiscoverServices mocks base method. -func (m *MockosImpl) DiscoverServices() (*discoveredServices, error) { +func (m *MockosImpl) DiscoverServices() (*model.ServicesResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DiscoverServices") - ret0, _ := ret[0].(*discoveredServices) + ret0, _ := ret[0].(*model.ServicesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index f36c9e5d3a9ae3..9c56cf1f7aaf32 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -74,7 +74,7 @@ func (s *linuxTestSuite) TestServiceDiscoveryCheck() { // This is very useful for debugging, but we probably don't want to decode // and assert based on this in this E2E test since this is an internal // interface between the agent and system-probe. - services := s.Env().RemoteHost.MustExecute("sudo curl -s --unix /opt/datadog-agent/run/sysprobe.sock http://unix/discovery/services") + services := s.Env().RemoteHost.MustExecute("sudo curl -s --unix /opt/datadog-agent/run/sysprobe.sock http://unix/discovery/debug") t.Log("system-probe services", services) assert.EventuallyWithT(t, func(c *assert.CollectT) { From ff2c7a104754c4df4f2173cc7b56d582d2c9b396 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 29 Jan 2025 17:32:47 +0100 Subject: [PATCH 48/97] [CSPM] first try to resolve the hostname from the core agent (#33523) --- cmd/security-agent/subcommands/check/command.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/security-agent/subcommands/check/command.go b/cmd/security-agent/subcommands/check/command.go index 8074677ddfcde7..a1b3aeb3ebfe78 100644 --- a/cmd/security-agent/subcommands/check/command.go +++ b/cmd/security-agent/subcommands/check/command.go @@ -34,6 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" + "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" ) @@ -108,7 +109,13 @@ func commandsWrapped(bundleParamsFactory func() core.BundleParams) []*cobra.Comm // RunCheck runs a check func RunCheck(log log.Component, config config.Component, _ secrets.Component, statsdComp statsd.Component, checkArgs *CliParams, compression logscompression.Component) error { - hname, err := hostname.Get(context.TODO()) + var hname string + var err error + if flavor.GetFlavor() == flavor.ClusterAgent { + hname, err = hostname.Get(context.TODO()) + } else { + hname, err = hostnameutils.GetHostnameWithContextAndFallback(context.Background()) + } if err != nil { return err } From 3728d5a4abbb31f074fcce7953739502811991ae Mon Sep 17 00:00:00 2001 From: Marethyu <45374460+Pythyu@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:54:54 +0100 Subject: [PATCH 49/97] Generate buildenv bump PR on rc only (#33540) --- .gitlab/trigger_release/trigger_release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index d994f520651ce1..b6f294a8f51036 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -75,7 +75,7 @@ generate_windows_gitlab_runner_bump_pr: when: never - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ when: never - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$/ + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ script: # We are using the agent platform auto PR github app to access the buildenv repository (already used for macOS builds) @@ -95,7 +95,7 @@ generate_windows_gitlab_runner_bump_pr_manual: when: never - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ when: never - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$/ + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ when: manual script: # We are using the agent platform auto PR github app to access the buildenv repository (already used for macOS builds) From 3f106913960b7a0eadc378b3e1566fc3bf9f9e32 Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Wed, 29 Jan 2025 18:19:45 +0100 Subject: [PATCH 50/97] [ASCII-2666] Add agent-runtimes as reviewer on golang.org/x update PRs (#33473) --- .github/workflows/update_dependencies.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/update_dependencies.yml b/.github/workflows/update_dependencies.yml index e449cb894413ec..a5f0e66454af40 100644 --- a/.github/workflows/update_dependencies.yml +++ b/.github/workflows/update_dependencies.yml @@ -80,4 +80,5 @@ jobs: --base "$GITHUB_REF" \ --title "$PR_TITLE" \ --body-file "$TMP_PR_BODY_PATH" \ - --label "$PR_LABELS" + --label "$PR_LABELS" \ + --reviewer "DataDog/agent-runtimes" From 517df3f51a8434847da4810a62b28e40d7771ab5 Mon Sep 17 00:00:00 2001 From: Jeremy Hanna Date: Wed, 29 Jan 2025 11:23:14 -0600 Subject: [PATCH 51/97] [ASCII-2691] Change wording on FIPS Mode status indication (#33507) --- comp/core/status/statusimpl/common_header_provider.go | 10 +++++++++- .../status/statusimpl/common_header_provider_test.go | 4 ++++ comp/core/status/statusimpl/status_test.go | 7 +++++++ comp/core/status/statusimpl/templates/html.tmpl | 6 +++--- comp/core/status/statusimpl/templates/text.tmpl | 2 +- pkg/fips/fips_disabled.go | 2 +- pkg/fips/fips_nix.go | 9 ++++++--- pkg/fips/fips_windows.go | 9 ++++++--- .../agent-subcommands/status/status_common_test.go | 1 + .../tests/agent-subcommands/status/status_nix_test.go | 2 +- test/new-e2e/tests/fips-compliance/fips_nix_test.go | 5 ++--- 11 files changed, 41 insertions(+), 16 deletions(-) diff --git a/comp/core/status/statusimpl/common_header_provider.go b/comp/core/status/statusimpl/common_header_provider.go index 0a7d725e75a073..4c6a0643f0be3b 100644 --- a/comp/core/status/statusimpl/common_header_provider.go +++ b/comp/core/status/statusimpl/common_header_provider.go @@ -74,8 +74,8 @@ func (h *headerProvider) HTML(_ bool, buffer io.Writer) error { func (h *headerProvider) data() map[string]interface{} { data := maps.Clone(h.constdata) data["time_nano"] = nowFunc().UnixNano() - data["fips_status"] = fips.Status() data["config"] = populateConfig(h.config) + data["fips_status"] = populateFIPSStatus(h.config) return data } @@ -115,3 +115,11 @@ func populateConfig(config config.Component) map[string]string { return conf } + +func populateFIPSStatus(config config.Component) string { + fipsStatus := fips.Status() + if fipsStatus == "not available" && config.GetString("fips.enabled") == "true" { + return "proxy" + } + return fipsStatus +} diff --git a/comp/core/status/statusimpl/common_header_provider_test.go b/comp/core/status/statusimpl/common_header_provider_test.go index ebbce0eb17ac33..6cd7f471cd4d1e 100644 --- a/comp/core/status/statusimpl/common_header_provider_test.go +++ b/comp/core/status/statusimpl/common_header_provider_test.go @@ -89,6 +89,7 @@ func TestCommonHeaderProviderText(t *testing.T) { Python Version: n/a Build arch: %s Agent flavor: %s + FIPS Mode: not available Log Level: info Paths @@ -188,6 +189,7 @@ func TestCommonHeaderProviderTextWithFipsInformation(t *testing.T) { Python Version: n/a Build arch: %s Agent flavor: %s + FIPS Mode: proxy Log Level: info Paths @@ -241,6 +243,7 @@ func TestCommonHeaderProviderHTML(t *testing.T) { Flavor: %s
PID: %d
Agent start: 2018-01-05 11:25:15 UTC (1515151515000)
+ FIPS Mode: not available
Log Level: info
Config File: There is no config file
Conf.d Path: %s
@@ -304,6 +307,7 @@ func TestCommonHeaderProviderHTMLWithFipsInformation(t *testing.T) { Flavor: %s
PID: %d
Agent start: 2018-01-05 11:25:15 UTC (1515151515000)
+ FIPS Mode: proxy
Log Level: info
Config File: There is no config file
Conf.d Path: %s
diff --git a/comp/core/status/statusimpl/status_test.go b/comp/core/status/statusimpl/status_test.go index 319dc52a35e5fe..ac1009220e498a 100644 --- a/comp/core/status/statusimpl/status_test.go +++ b/comp/core/status/statusimpl/status_test.go @@ -272,6 +272,7 @@ func TestGetStatus(t *testing.T) { Python Version: n/a Build arch: %s Agent flavor: %s + FIPS Mode: not available Log Level: info Paths @@ -324,6 +325,7 @@ X Section Python Version: n/a Build arch: %s Agent flavor: %s + FIPS Mode: not available Log Level: info Paths @@ -374,6 +376,7 @@ X Section Flavor: %s
PID: %d
Agent start: 2018-01-05 11:25:15 UTC (1515151515000)
+ FIPS Mode: not available
Log Level: info
Config File: There is no config file
Conf.d Path: %s
@@ -428,6 +431,7 @@ X Section Flavor: %s
PID: %d
Agent start: 2018-01-05 11:25:15 UTC (1515151515000)
+ FIPS Mode: not available
Log Level: info
Config File: There is no config file
Conf.d Path: %s
@@ -515,6 +519,7 @@ func TestGetStatusDoNotRenderHeaderIfNoProviders(t *testing.T) { Python Version: n/a Build arch: %s Agent flavor: %s + FIPS Mode: not available Log Level: info Paths @@ -602,6 +607,7 @@ func TestGetStatusWithErrors(t *testing.T) { Python Version: n/a Build arch: %s Agent flavor: agent + FIPS Mode: not available Log Level: info Paths @@ -915,6 +921,7 @@ Status render errors Python Version: n/a Build arch: %s Agent flavor: agent + FIPS Mode: not available Log Level: info Paths diff --git a/comp/core/status/statusimpl/templates/html.tmpl b/comp/core/status/statusimpl/templates/html.tmpl index dc3efef7b9f233..0087ff9ebbc774 100644 --- a/comp/core/status/statusimpl/templates/html.tmpl +++ b/comp/core/status/statusimpl/templates/html.tmpl @@ -3,11 +3,11 @@ Version: {{.version}}
Flavor: {{.flavor}}
- {{- if .fips_status}} - FIPS compliant: {{.fips_status}}
- {{- end }} PID: {{.pid}}
Agent start: {{ formatUnixTime .agent_start_nano }}
+ {{- if .fips_status}} + FIPS Mode: {{.fips_status}}
+ {{- end }} {{- if .config.log_file}} Log File: {{.config.log_file}}
{{end}} diff --git a/comp/core/status/statusimpl/templates/text.tmpl b/comp/core/status/statusimpl/templates/text.tmpl index a86ad4069da38b..9210d4148f9138 100644 --- a/comp/core/status/statusimpl/templates/text.tmpl +++ b/comp/core/status/statusimpl/templates/text.tmpl @@ -8,7 +8,7 @@ Build arch: {{.build_arch}} Agent flavor: {{.flavor}} {{- if .fips_status}} - FIPS compliant: {{.fips_status}} + FIPS Mode: {{.fips_status}} {{- end }} {{- if .config.log_file}} Log File: {{.config.log_file}} diff --git a/pkg/fips/fips_disabled.go b/pkg/fips/fips_disabled.go index 0f4fdf6f79868e..7042a3cf3c8e13 100644 --- a/pkg/fips/fips_disabled.go +++ b/pkg/fips/fips_disabled.go @@ -10,7 +10,7 @@ package fips // Status returns an empty string when not the datadog-fips-agent flavor func Status() string { - return "" + return "not available" } // Enabled returns false when not the datadog-fips-agent flavor diff --git a/pkg/fips/fips_nix.go b/pkg/fips/fips_nix.go index d0a41c1626c7e6..5eda87b83cf1eb 100644 --- a/pkg/fips/fips_nix.go +++ b/pkg/fips/fips_nix.go @@ -10,13 +10,16 @@ package fips import ( "os" - "strconv" ) -// Status returns a displayable string or error of FIPS compliance state of the agent build and runtime +// Status returns a displayable string or error of FIPS Mode of the agent build and runtime func Status() string { enabled, _ := Enabled() - return strconv.FormatBool(enabled) + if enabled { + return "enabled" + } else { + return "disabled" + } } // Enabled checks to see if the agent runtime environment is as expected relating to its build to be FIPS compliant. For Linux this is that the binary is run with the GOFIPS=1 environment variable. diff --git a/pkg/fips/fips_windows.go b/pkg/fips/fips_windows.go index b13e2c4878e7d3..a5a0e7cb96b298 100644 --- a/pkg/fips/fips_windows.go +++ b/pkg/fips/fips_windows.go @@ -10,15 +10,18 @@ package fips import ( "fmt" - "strconv" "golang.org/x/sys/windows/registry" ) -// Status returns a displayable string or error of FIPS compliance state of the agent build and runtime +// Status returns a displayable string or error of FIPS Mode of the agent build and runtime func Status() string { enabled, _ := Enabled() - return strconv.FormatBool(enabled) + if enabled { + return "enabled" + } else { + return "disabled" + } } // Enabled checks to see if the agent runtime environment is as expected relating to its build to be FIPS compliant. For Windows this means that FIPS mode is enabled via the Windows registry. diff --git a/test/new-e2e/tests/agent-subcommands/status/status_common_test.go b/test/new-e2e/tests/agent-subcommands/status/status_common_test.go index 669e0ff7ec1636..92bb937f69d2a1 100644 --- a/test/new-e2e/tests/agent-subcommands/status/status_common_test.go +++ b/test/new-e2e/tests/agent-subcommands/status/status_common_test.go @@ -102,6 +102,7 @@ func (v *baseStatusSuite) TestDefaultInstallStatus() { { name: `Agent \(.*\)`, // TODO: verify that the right version is output shouldBePresent: true, + shouldContain: []string{"FIPS Mode: not available"}, shouldNotContain: []string{"FIPS proxy"}, }, { diff --git a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go index 35841716f6463f..eaace3299438d1 100644 --- a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go @@ -47,7 +47,7 @@ func (v *linuxStatusSuite) TestFIPSProxyStatus() { { name: `Agent \(.*\)`, shouldBePresent: true, - shouldContain: []string{"FIPS proxy"}, + shouldContain: []string{"FIPS Mode: proxy", "FIPS proxy"}, }, } diff --git a/test/new-e2e/tests/fips-compliance/fips_nix_test.go b/test/new-e2e/tests/fips-compliance/fips_nix_test.go index c70221dbc6dcf5..569a5eb591b34f 100644 --- a/test/new-e2e/tests/fips-compliance/fips_nix_test.go +++ b/test/new-e2e/tests/fips-compliance/fips_nix_test.go @@ -35,7 +35,6 @@ func TestLinuxFIPSComplianceSuite(t *testing.T) { awshost.WithEC2InstanceOptions(ec2.WithOS(os.UbuntuDefault)), awshost.WithAgentOptions(agentparams.WithFlavor("datadog-fips-agent")), )), - e2e.WithSkipDeleteOnFailure(), ) } @@ -43,7 +42,7 @@ func (v *LinuxFIPSComplianceSuite) TestFIPSDefaultConfig() { status := v.Env().RemoteHost.MustExecute("sudo GOFIPS=0 datadog-agent status") assert.NotContains(v.T(), status, "can't enable FIPS mode for OpenSSL") assert.Contains(v.T(), status, "Status date") - assert.Contains(v.T(), status, "FIPS compliant: false") + assert.Contains(v.T(), status, "FIPS Mode: disabled") v.Env().RemoteHost.MustExecute("sudo systemctl set-environment GOFIPS=1") v.Env().RemoteHost.MustExecute("sudo systemctl restart datadog-agent") @@ -52,7 +51,7 @@ func (v *LinuxFIPSComplianceSuite) TestFIPSDefaultConfig() { status = v.Env().RemoteHost.MustExecute("sudo GOFIPS=1 datadog-agent status") assert.NotContains(t, status, "can't enable FIPS mode for OpenSSL") assert.Contains(t, status, "Status date") - assert.Contains(t, status, "FIPS compliant: true") + assert.Contains(t, status, "FIPS Mode: enabled") }, 60*time.Second, 5*time.Second) v.Env().RemoteHost.MustExecute("sudo systemctl unset-environment GOFIPS") From 2174ab4d64c81c38654742f5324efb2ba36b0942 Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Wed, 29 Jan 2025 18:43:17 +0100 Subject: [PATCH 52/97] [CWS] Do not import model package from utils package (#33543) --- .../{utils => probe/procfs}/snapshot_bound_sockets.go | 11 ++++++----- pkg/security/resolvers/resolvers_ebpf.go | 3 ++- .../activity_tree/process_node_snapshot.go | 3 ++- 3 files changed, 10 insertions(+), 7 deletions(-) rename pkg/security/{utils => probe/procfs}/snapshot_bound_sockets.go (98%) diff --git a/pkg/security/utils/snapshot_bound_sockets.go b/pkg/security/probe/procfs/snapshot_bound_sockets.go similarity index 98% rename from pkg/security/utils/snapshot_bound_sockets.go rename to pkg/security/probe/procfs/snapshot_bound_sockets.go index 6b01dd5c731e92..565d7613e66719 100644 --- a/pkg/security/utils/snapshot_bound_sockets.go +++ b/pkg/security/probe/procfs/snapshot_bound_sockets.go @@ -5,8 +5,8 @@ //go:build linux -// Package utils holds utils related files -package utils +// Package procfs holds procfs related files +package procfs import ( "fmt" @@ -14,12 +14,13 @@ import ( "strconv" "strings" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/seclog" - "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/prometheus/procfs" "github.com/shirou/gopsutil/v4/process" "golang.org/x/sys/unix" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/seclog" + "github.com/DataDog/datadog-agent/pkg/util/kernel" ) // GetBoundSockets returns the list of bound sockets for a given process diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go index 61c24551473348..77d0863dff19ca 100644 --- a/pkg/security/resolvers/resolvers_ebpf.go +++ b/pkg/security/resolvers/resolvers_ebpf.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe/erpc" "github.com/DataDog/datadog-agent/pkg/security/probe/managerhelper" + "github.com/DataDog/datadog-agent/pkg/security/probe/procfs" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" "github.com/DataDog/datadog-agent/pkg/security/resolvers/container" "github.com/DataDog/datadog-agent/pkg/security/resolvers/dentry" @@ -330,7 +331,7 @@ func (r *EBPFResolvers) snapshotBoundSockets() error { } for _, proc := range processes { - bs, err := utils.GetBoundSockets(proc) + bs, err := procfs.GetBoundSockets(proc) if err != nil { log.Debugf("sockets snapshot failed for (pid: %v): %s", proc.Pid, err) continue diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index 6e374e1af1c77a..393e64fa16a0d5 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -24,6 +24,7 @@ import ( "github.com/shirou/gopsutil/v4/process" "golang.org/x/sys/unix" + "github.com/DataDog/datadog-agent/pkg/security/probe/procfs" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -266,7 +267,7 @@ func extractPathFromSmapsLine(line []byte) (string, bool) { } func (pn *ProcessNode) snapshotBoundSockets(p *process.Process, stats *Stats, newEvent func() *model.Event) { - boundSockets, err := utils.GetBoundSockets(p) + boundSockets, err := procfs.GetBoundSockets(p) if err != nil { seclog.Warnf("error while listing sockets (pid: %v): %s", p.Pid, err) return From 9cc269ce5d92e4e417bc98b2f655018392c84362 Mon Sep 17 00:00:00 2001 From: Dan Lepage <140522866+dplepage-dd@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:01:22 -0500 Subject: [PATCH 53/97] Reapply "[NDM] Cache in Devicecheck (#32373)" (#33366) (#33430) --- .../snmp/internal/checkconfig/buildprofile.go | 75 +++ .../internal/checkconfig/buildprofile_test.go | 226 ++++++++ .../snmp/internal/checkconfig/config.go | 165 +----- .../snmp/internal/checkconfig/config_oid.go | 50 -- .../internal/checkconfig/config_oid_test.go | 40 -- .../snmp/internal/checkconfig/config_test.go | 541 +++++------------- .../snmp/internal/devicecheck/devicecheck.go | 120 +++- .../internal/devicecheck/devicecheck_test.go | 28 +- .../snmp/internal/discovery/discovery_test.go | 7 + .../corechecks/snmp/internal/fetch/fetch.go | 13 +- .../snmp/internal/fetch/fetch_test.go | 54 +- .../internal/report/report_device_metadata.go | 32 +- .../report/report_device_metadata_test.go | 180 +++--- .../profiledefinition/profile_definition.go | 13 - 14 files changed, 688 insertions(+), 856 deletions(-) create mode 100644 pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go create mode 100644 pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile_test.go delete mode 100644 pkg/collector/corechecks/snmp/internal/checkconfig/config_oid.go delete mode 100644 pkg/collector/corechecks/snmp/internal/checkconfig/config_oid_test.go diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go new file mode 100644 index 00000000000000..7da0734e7d2403 --- /dev/null +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go @@ -0,0 +1,75 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package checkconfig + +import ( + "fmt" + "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" + "github.com/DataDog/datadog-agent/pkg/util/log" + "maps" + "slices" +) + +// BuildProfile builds the fetchable profile for this config. +// +// If ProfileName == ProfileNameInline, then the result just contains the inline +// metrics and tags from the initconfig. This is also true if ProfileName == +// ProfileNameAuto and sysObjectID == "" (this is useful when you want basic +// metadata for a device that you can't yet get the sysObjectID from). +// +// Otherwise, the result will be a copy of the profile from ProfileProvider that +// matches this config, either by sysObjectID if ProfileName == ProfileNameAuto +// or by ProfileName directly otherwise. +// +// The error will be non-nil if ProfileProvider doesn't know ProfileName, or if +// ProfileName is ProfileNameAuto and ProfileProvider finds no match for +// sysObjectID. In this case the returned profile will still be non-nil, and +// will be the same as what you'd get for an inline profile. +func (c *CheckConfig) BuildProfile(sysObjectID string) (profiledefinition.ProfileDefinition, error) { + var rootProfile *profiledefinition.ProfileDefinition + var profileErr error + + switch c.ProfileName { + case ProfileNameInline: // inline profile -> no parent + rootProfile = nil + case ProfileNameAuto: // determine based on sysObjectID + // empty sysObjectID happens when we need the profile but couldn't connect to the device. + if sysObjectID != "" { + if profileConfig, err := c.ProfileProvider.GetProfileForSysObjectID(sysObjectID); err != nil { + profileErr = fmt.Errorf("failed to get profile for sysObjectID %q: %v", sysObjectID, err) + } else { + rootProfile = &profileConfig.Definition + log.Debugf("detected profile %q for sysobjectid %q", rootProfile.Name, sysObjectID) + } + } + default: + if profile := c.ProfileProvider.GetProfile(c.ProfileName); profile == nil { + profileErr = fmt.Errorf("unknown profile %q", c.ProfileName) + } else { + rootProfile = &profile.Definition + } + } + + profile := *profiledefinition.NewProfileDefinition() + profile.Metrics = slices.Clone(c.RequestedMetrics) + profile.MetricTags = slices.Clone(c.RequestedMetricTags) + if rootProfile != nil { + profile.Name = rootProfile.Name + profile.Version = rootProfile.Version + profile.StaticTags = append(profile.StaticTags, "snmp_profile:"+rootProfile.Name) + vendor := rootProfile.Device.Vendor + if vendor != "" { + profile.StaticTags = append(profile.StaticTags, "device_vendor:"+vendor) + } + profile.StaticTags = append(profile.StaticTags, rootProfile.StaticTags...) + profile.Metadata = maps.Clone(rootProfile.Metadata) + profile.Metrics = append(profile.Metrics, rootProfile.Metrics...) + profile.MetricTags = append(profile.MetricTags, rootProfile.MetricTags...) + } + profile.Metadata = updateMetadataDefinitionWithDefaults(profile.Metadata, c.CollectTopology) + + return profile, profileErr +} diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile_test.go new file mode 100644 index 00000000000000..de8ada8875a9fa --- /dev/null +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile_test.go @@ -0,0 +1,226 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package checkconfig + +import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/profile" + "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestBuildProfile(t *testing.T) { + metrics := []profiledefinition.MetricsConfig{ + {Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}}, + { + Symbols: []profiledefinition.SymbolConfig{ + { + OID: "1.2.3.4.6", + Name: "abc", + }, + }, + MetricTags: profiledefinition.MetricTagConfigList{ + profiledefinition.MetricTagConfig{ + Symbol: profiledefinition.SymbolConfigCompat{ + OID: "1.2.3.4.7", + }, + }, + }, + }, + } + profile1 := profiledefinition.ProfileDefinition{ + Name: "profile1", + Version: 12, + Metrics: metrics, + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + Metadata: profiledefinition.MetadataConfig{ + "device": { + Fields: map[string]profiledefinition.MetadataField{ + "vendor": { + Value: "a-vendor", + }, + "description": { + Symbol: profiledefinition.SymbolConfig{ + OID: "1.3.6.1.2.1.1.99.3.0", + Name: "sysDescr", + }, + }, + "name": { + Symbols: []profiledefinition.SymbolConfig{ + { + OID: "1.3.6.1.2.1.1.99.1.0", + Name: "symbol1", + }, + { + OID: "1.3.6.1.2.1.1.99.2.0", + Name: "symbol2", + }, + }, + }, + }, + }, + "interface": { + Fields: map[string]profiledefinition.MetadataField{ + "oper_status": { + Symbol: profiledefinition.SymbolConfig{ + OID: "1.3.6.1.2.1.2.2.1.99", + Name: "someIfSymbol", + }, + }, + }, + IDTags: profiledefinition.MetricTagConfigList{ + { + Tag: "interface", + Symbol: profiledefinition.SymbolConfigCompat{ + OID: "1.3.6.1.2.1.31.1.1.1.1", + Name: "ifName", + }, + }, + }, + }, + }, + SysObjectIDs: profiledefinition.StringArray{"1.1.1.*"}, + } + + mergedMetadata := make(profiledefinition.MetadataConfig) + mergeMetadata(mergedMetadata, profile1.Metadata) + mergedMetadata["ip_addresses"] = LegacyMetadataConfig["ip_addresses"] + + mockProfiles := profile.StaticProvider(profile.ProfileConfigMap{ + "profile1": profile.ProfileConfig{ + Definition: profile1, + }, + }) + + type testCase struct { + name string + config *CheckConfig + sysObjectID string + expected profiledefinition.ProfileDefinition + expectedError string + } + for _, tc := range []testCase{ + { + name: "inline", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + RequestedMetrics: metrics, + RequestedMetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + ProfileName: ProfileNameInline, + }, + expected: profiledefinition.ProfileDefinition{ + Metrics: metrics, + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + Metadata: LegacyMetadataConfig, + }, + }, { + name: "static", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + ProfileProvider: mockProfiles, + ProfileName: "profile1", + }, + expected: profiledefinition.ProfileDefinition{ + Name: "profile1", + Version: 12, + Metrics: metrics, + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + StaticTags: []string{"snmp_profile:profile1"}, + Metadata: mergedMetadata, + }, + }, { + name: "dynamic", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + ProfileProvider: mockProfiles, + ProfileName: ProfileNameAuto, + }, + sysObjectID: "1.1.1.1", + expected: profiledefinition.ProfileDefinition{ + Name: "profile1", + Version: 12, + Metrics: metrics, + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + StaticTags: []string{"snmp_profile:profile1"}, + Metadata: mergedMetadata, + }, + }, { + name: "static with requested metrics", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + ProfileProvider: mockProfiles, + CollectDeviceMetadata: true, + CollectTopology: false, + ProfileName: "profile1", + RequestedMetrics: []profiledefinition.MetricsConfig{ + {Symbol: profiledefinition.SymbolConfig{OID: "3.1", Name: "global-metric"}}}, + RequestedMetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "global-tag", Symbol: profiledefinition.SymbolConfigCompat{OID: "3.2", Name: "globalSymbol"}}, + }, + }, + expected: profiledefinition.ProfileDefinition{ + Name: "profile1", + Version: 12, + Metrics: append([]profiledefinition.MetricsConfig{ + {Symbol: profiledefinition.SymbolConfig{OID: "3.1", Name: "global-metric"}}}, + metrics...), + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "global-tag", Symbol: profiledefinition.SymbolConfigCompat{OID: "3.2", Name: "globalSymbol"}}, + {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, + }, + Metadata: mergedMetadata, + StaticTags: []string{"snmp_profile:profile1"}, + }, + }, { + name: "static unknown", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + ProfileProvider: mockProfiles, + ProfileName: "f5", + }, + expectedError: "unknown profile \"f5\"", + }, { + name: "dynamic unknown", + config: &CheckConfig{ + IPAddress: "1.2.3.4", + ProfileProvider: mockProfiles, + ProfileName: ProfileNameAuto, + }, + sysObjectID: "3.3.3.3", + expectedError: "failed to get profile for sysObjectID \"3.3.3.3\": no profiles found for sysObjectID \"3." + + "3.3.3\"", + }, + } { + t.Run(tc.name, func(t *testing.T) { + profile, err := tc.config.BuildProfile(tc.sysObjectID) + if tc.expectedError != "" { + assert.EqualError(t, err, tc.expectedError) + } else { + require.NoError(t, err) + if !assert.Equal(t, tc.expected, profile) { + for k, v := range tc.expected.Metadata["device"].Fields { + t.Log(k, v) + } + t.Log("===") + for k, v := range profile.Metadata["device"].Fields { + t.Log(k, v) + } + } + } + }) + } +} diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go index ab5cc66f49f974..8689e3b69ada15 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go @@ -8,7 +8,6 @@ package checkconfig import ( "context" - "encoding/json" "fmt" "hash/fnv" "net" @@ -72,6 +71,11 @@ const DefaultPingTimeout = 3 * time.Second // DefaultPingInterval is the default time to wait between sending ping packets const DefaultPingInterval = 10 * time.Millisecond +// config.ProfileName will be set to ProfileNameAuto if the profile is auto-detected, +// and ProfileNameInline if metrics were provided in the initial config (so that no profile is used) +const ProfileNameAuto = "" +const ProfileNameInline = "" + var uptimeMetricConfig = profiledefinition.MetricsConfig{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}} // DeviceDigest is the digest of a minimal config used for autodiscovery @@ -164,20 +168,13 @@ type CheckConfig struct { PrivProtocol string PrivKey string ContextName string - OidConfig OidConfig // RequestedMetrics are the metrics explicitly requested by config. RequestedMetrics []profiledefinition.MetricsConfig // RequestedMetricTags are the tags explicitly requested by config. - RequestedMetricTags []profiledefinition.MetricTagConfig - // Metrics combines RequestedMetrics with profile metrics. - Metrics []profiledefinition.MetricsConfig - Metadata profiledefinition.MetadataConfig - // MetricTags combines RequestedMetricTags with profile metric tags. - MetricTags []profiledefinition.MetricTagConfig + RequestedMetricTags []profiledefinition.MetricTagConfig OidBatchSize int BulkMaxRepetitions uint32 ProfileProvider profile.Provider - ProfileTags []string ProfileName string ExtraTags []string InstanceTags []string @@ -188,7 +185,6 @@ type CheckConfig struct { DeviceIDTags []string ResolvedSubnetName string Namespace string - AutodetectProfile bool MinCollectionInterval time.Duration Network string @@ -203,61 +199,6 @@ type CheckConfig struct { PingConfig pinger.Config } -// SetProfile refreshes config based on profile -func (c *CheckConfig) SetProfile(profileName string) error { - profileConf := c.ProfileProvider.GetProfile(profileName) - if profileConf == nil { - return fmt.Errorf("unknown profile `%s`", profileName) - } - log.Debugf("Refreshing with profile `%s`", profileName) - c.ProfileName = profileName - - if log.ShouldLog(log.DebugLvl) { - profileDefJSON, _ := json.Marshal(profileConf.Definition) - log.Debugf("Profile content `%s`: %s", profileName, string(profileDefJSON)) - } - c.RebuildMetadataMetricsAndTags() - return nil -} - -// GetProfileDef returns the autodetected profile definition if there is one, -// the active profile if it exists, or nil if neither is true. -func (c *CheckConfig) GetProfileDef() *profiledefinition.ProfileDefinition { - if c.ProfileName != "" { - profile := c.ProfileProvider.GetProfile(c.ProfileName) - if profile != nil { - return &profile.Definition - } - log.Warnf("profile `%s` not found", c.ProfileName) - } - return nil -} - -// RebuildMetadataMetricsAndTags rebuilds c.Metrics, c.Metadata, c.MetricTags, -// and c.OidConfig by merging data from requested metrics/tags and the current -// profile. -func (c *CheckConfig) RebuildMetadataMetricsAndTags() { - c.Metrics = c.RequestedMetrics - c.MetricTags = c.RequestedMetricTags - c.ProfileTags = nil - profileDef := c.GetProfileDef() - if profileDef != nil { - c.ProfileTags = append(c.ProfileTags, "snmp_profile:"+c.ProfileName) - if profileDef.Device.Vendor != "" { - c.ProfileTags = append(c.ProfileTags, "device_vendor:"+profileDef.Device.Vendor) - } - c.ProfileTags = append(c.ProfileTags, profileDef.StaticTags...) - c.Metadata = updateMetadataDefinitionWithDefaults(profileDef.Metadata, c.CollectTopology) - c.Metrics = append(c.Metrics, profileDef.Metrics...) - c.MetricTags = append(c.MetricTags, profileDef.MetricTags...) - } else { - c.Metadata = updateMetadataDefinitionWithDefaults(nil, c.CollectTopology) - } - c.OidConfig.clean() - c.OidConfig.addScalarOids(c.parseScalarOids(c.Metrics, c.MetricTags, c.Metadata)) - c.OidConfig.addColumnOids(c.parseColumnOids(c.Metrics, c.Metadata)) -} - // UpdateDeviceIDAndTags updates DeviceID and DeviceIDTags func (c *CheckConfig) UpdateDeviceIDAndTags() { c.DeviceIDTags = coreutilsort.UniqInPlace(c.getDeviceIDTags()) @@ -310,8 +251,7 @@ func (c *CheckConfig) getDeviceIDTags() []string { // ToString used for logging CheckConfig without sensitive information func (c *CheckConfig) ToString() string { return fmt.Sprintf("CheckConfig: IPAddress=`%s`, Port=`%d`, SnmpVersion=`%s`, Timeout=`%d`, Retries=`%d`, "+ - "User=`%s`, AuthProtocol=`%s`, PrivProtocol=`%s`, ContextName=`%s`, OidConfig=`%#v`, "+ - "OidBatchSize=`%d`, ProfileTags=`%#v`", + "User=`%s`, AuthProtocol=`%s`, PrivProtocol=`%s`, ContextName=`%s`, OidBatchSize=`%d`, ProfileName=`%s`", c.IPAddress, c.Port, c.SnmpVersion, @@ -321,9 +261,8 @@ func (c *CheckConfig) ToString() string { c.AuthProtocol, c.PrivProtocol, c.ContextName, - c.OidConfig, c.OidBatchSize, - c.ProfileTags, + c.ProfileName, ) } @@ -499,11 +438,13 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data c.ProfileProvider = profiles // profile configs - profileName := instance.Profile - if profileName != "" || len(instance.Metrics) > 0 { - c.AutodetectProfile = false - } else { - c.AutodetectProfile = true + c.ProfileName = instance.Profile + if c.ProfileName == "" { + if len(instance.Metrics) > 0 { + c.ProfileName = ProfileNameInline + } else { + c.ProfileName = ProfileNameAuto + } } c.InstanceTags = instance.Tags @@ -525,15 +466,6 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data return nil, fmt.Errorf("validation errors: %s", strings.Join(errors, "\n")) } - if profileName != "" { - err = c.SetProfile(profileName) - if err != nil { - return nil, fmt.Errorf("failed to refresh with profile `%s`: %s", profileName, err) - } - } else { - c.RebuildMetadataMetricsAndTags() - } - // Ping configuration if instance.PingConfig.Enabled != nil { c.PingEnabled = bool(*instance.PingConfig.Enabled) @@ -645,24 +577,14 @@ func (c *CheckConfig) Copy() *CheckConfig { newConfig.PrivKey = c.PrivKey newConfig.ContextName = c.ContextName newConfig.ContextName = c.ContextName - newConfig.OidConfig = c.OidConfig newConfig.RequestedMetrics = make([]profiledefinition.MetricsConfig, len(c.RequestedMetrics)) copy(newConfig.RequestedMetrics, c.RequestedMetrics) - newConfig.Metrics = make([]profiledefinition.MetricsConfig, len(c.Metrics)) - copy(newConfig.Metrics, c.Metrics) - - // Metadata: shallow copy is enough since metadata is not modified. - // However, it might be fully replaced, see CheckConfig.SetProfile - newConfig.Metadata = c.Metadata newConfig.RequestedMetricTags = make([]profiledefinition.MetricTagConfig, len(c.RequestedMetricTags)) copy(newConfig.RequestedMetricTags, c.RequestedMetricTags) - newConfig.MetricTags = make([]profiledefinition.MetricTagConfig, len(c.MetricTags)) - copy(newConfig.MetricTags, c.MetricTags) newConfig.OidBatchSize = c.OidBatchSize newConfig.BulkMaxRepetitions = c.BulkMaxRepetitions newConfig.ProfileProvider = c.ProfileProvider - newConfig.ProfileTags = netutils.CopyStrings(c.ProfileTags) newConfig.ProfileName = c.ProfileName newConfig.ExtraTags = netutils.CopyStrings(c.ExtraTags) newConfig.InstanceTags = netutils.CopyStrings(c.InstanceTags) @@ -674,7 +596,6 @@ func (c *CheckConfig) Copy() *CheckConfig { newConfig.DeviceIDTags = netutils.CopyStrings(c.DeviceIDTags) newConfig.ResolvedSubnetName = c.ResolvedSubnetName newConfig.Namespace = c.Namespace - newConfig.AutodetectProfile = c.AutodetectProfile newConfig.MinCollectionInterval = c.MinCollectionInterval newConfig.InterfaceConfigs = c.InterfaceConfigs @@ -700,62 +621,6 @@ func (c *CheckConfig) IsDiscovery() bool { return c.Network != "" } -func (c *CheckConfig) parseScalarOids(metrics []profiledefinition.MetricsConfig, metricTags []profiledefinition.MetricTagConfig, metadataConfigs profiledefinition.MetadataConfig) []string { - var oids []string - for _, metric := range metrics { - oids = append(oids, metric.Symbol.OID) - } - for _, metricTag := range metricTags { - oids = append(oids, metricTag.Symbol.OID) - } - if c.CollectDeviceMetadata { - for resource, metadataConfig := range metadataConfigs { - if !profiledefinition.IsMetadataResourceWithScalarOids(resource) { - continue - } - for _, field := range metadataConfig.Fields { - oids = append(oids, field.Symbol.OID) - for _, symbol := range field.Symbols { - oids = append(oids, symbol.OID) - } - } - // we don't support tags for now for resource (e.g. device) based on scalar OIDs - // profile root level `metric_tags` (tags used for both metadata, metrics, service checks) - // can be used instead - } - } - return oids -} - -func (c *CheckConfig) parseColumnOids(metrics []profiledefinition.MetricsConfig, metadataConfigs profiledefinition.MetadataConfig) []string { - var oids []string - for _, metric := range metrics { - for _, symbol := range metric.Symbols { - oids = append(oids, symbol.OID) - } - for _, metricTag := range metric.MetricTags { - oids = append(oids, metricTag.Symbol.OID) - } - } - if c.CollectDeviceMetadata { - for resource, metadataConfig := range metadataConfigs { - if profiledefinition.IsMetadataResourceWithScalarOids(resource) { - continue - } - for _, field := range metadataConfig.Fields { - oids = append(oids, field.Symbol.OID) - for _, symbol := range field.Symbols { - oids = append(oids, symbol.OID) - } - } - for _, tagConfig := range metadataConfig.IDTags { - oids = append(oids, tagConfig.Symbol.OID) - } - } - } - return oids -} - func getSubnetFromTags(tags []string) (string, error) { for _, tag := range tags { // `autodiscovery_subnet` is set as tags in AD Template diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid.go deleted file mode 100644 index d6992a3d87f797..00000000000000 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid.go +++ /dev/null @@ -1,50 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package checkconfig - -import "sort" - -// OidConfig holds configs for OIDs to fetch -type OidConfig struct { - // ScalarOids are all scalar oids to fetch - ScalarOids []string - // ColumnOids are all column oids to fetch - ColumnOids []string -} - -func (oc *OidConfig) addScalarOids(oidsToAdd []string) { - oc.ScalarOids = oc.addOidsIfNotPresent(oc.ScalarOids, oidsToAdd) -} - -func (oc *OidConfig) addColumnOids(oidsToAdd []string) { - oc.ColumnOids = oc.addOidsIfNotPresent(oc.ColumnOids, oidsToAdd) -} - -func (oc *OidConfig) addOidsIfNotPresent(configOids []string, oidsToAdd []string) []string { - for _, oidToAdd := range oidsToAdd { - if oidToAdd == "" { - continue - } - isAlreadyPresent := false - for _, oid := range configOids { - if oid == oidToAdd { - isAlreadyPresent = true - break - } - } - if isAlreadyPresent { - continue - } - configOids = append(configOids, oidToAdd) - } - sort.Strings(configOids) - return configOids -} - -func (oc *OidConfig) clean() { - oc.ScalarOids = nil - oc.ColumnOids = nil -} diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid_test.go deleted file mode 100644 index 6fae3b75ce0242..00000000000000 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_oid_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package checkconfig - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_oidConfig_addScalarOids(t *testing.T) { - conf := OidConfig{} - - assert.ElementsMatch(t, []string{}, conf.ScalarOids) - - conf.addScalarOids([]string{"1.1"}) - conf.addScalarOids([]string{"1.1"}) - conf.addScalarOids([]string{"1.2"}) - conf.addScalarOids([]string{"1.3"}) - conf.addScalarOids([]string{"1.0"}) - conf.addScalarOids([]string{""}) - assert.ElementsMatch(t, []string{"1.1", "1.2", "1.3", "1.0"}, conf.ScalarOids) -} - -func Test_oidConfig_addColumnOids(t *testing.T) { - conf := OidConfig{} - - assert.ElementsMatch(t, []string{}, conf.ColumnOids) - - conf.addColumnOids([]string{"1.1"}) - conf.addColumnOids([]string{"1.1"}) - conf.addColumnOids([]string{"1.2"}) - conf.addColumnOids([]string{"1.3"}) - conf.addColumnOids([]string{"1.0"}) - conf.addColumnOids([]string{""}) - assert.ElementsMatch(t, []string{"1.1", "1.2", "1.3", "1.0"}, conf.ColumnOids) -} diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go index ef9b8628da93a7..34653446818ecb 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go @@ -6,6 +6,7 @@ package checkconfig import ( + "github.com/stretchr/testify/require" "regexp" "testing" "time" @@ -155,106 +156,112 @@ bulk_max_repetitions: 20 assert.Equal(t, "my-privKey", config.PrivKey) assert.Equal(t, "my-contextName", config.ContextName) assert.Equal(t, []string{"device_namespace:default", "snmp_device:1.2.3.4", "device_ip:1.2.3.4", "device_id:default:1.2.3.4"}, config.GetStaticTags()) - expectedMetrics := []profiledefinition.MetricsConfig{ - {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.1", Name: "ifNumber"}}, - {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.2", Name: "ifNumber2"}, MetricTags: profiledefinition.MetricTagConfigList{ - {SymbolTag: "mytag1"}, - {SymbolTag: "mytag2"}, - }}, - {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.4.1.318.1.1.1.11.1.1.0", Name: "upsBasicStateOutputState", ScaleFactor: 10}, MetricType: profiledefinition.ProfileMetricTypeFlagStream, Options: profiledefinition.MetricsConfigOption{Placement: 5, MetricSuffix: "ReplaceBattery"}}, - { - Table: profiledefinition.SymbolConfig{ - OID: "1.3.6.1.2.1.2.2", - Name: "ifTable", - }, - Symbols: []profiledefinition.SymbolConfig{ - // ifInErrors defined in instance config with a different set of metric tags from the one defined - // in the imported profile - {OID: "1.3.6.1.2.1.2.2.1.14", Name: "ifInErrors"}, - {OID: "1.3.6.1.2.1.2.2.1.20", Name: "ifOutErrors", ScaleFactor: 3}, - }, - MetricTags: []profiledefinition.MetricTagConfig{ - {Tag: "if_index", Index: 1}, - {Tag: "if_desc", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.2.2.1.2", Name: "ifDescr"}, - IndexTransform: []profiledefinition.MetricIndexTransform{ - { - Start: 1, - End: 3, - }, - { - Start: 4, - End: 6, - }, - }, + assert.True(t, config.ProfileProvider.HasProfile("f5-big-ip")) + assert.Equal(t, "default:1.2.3.4", config.DeviceID) + assert.Equal(t, []string{"device_namespace:default", "snmp_device:1.2.3.4"}, config.DeviceIDTags) + assert.Equal(t, "127.0.0.0/30", config.ResolvedSubnetName) + assert.Equal(t, "f5-big-ip", config.ProfileName) + + t.Run("BuildProfile", func(t *testing.T) { + expectedMetrics := []profiledefinition.MetricsConfig{ + {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.1", Name: "ifNumber"}}, + {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.2", Name: "ifNumber2"}, MetricTags: profiledefinition.MetricTagConfigList{ + {SymbolTag: "mytag1"}, + {SymbolTag: "mytag2"}, + }}, + {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.4.1.318.1.1.1.11.1.1.0", Name: "upsBasicStateOutputState", ScaleFactor: 10}, MetricType: profiledefinition.ProfileMetricTypeFlagStream, Options: profiledefinition.MetricsConfigOption{Placement: 5, MetricSuffix: "ReplaceBattery"}}, + { + Table: profiledefinition.SymbolConfig{ + OID: "1.3.6.1.2.1.2.2", + Name: "ifTable", }, - {Tag: "ipversion", Index: 1, Mapping: map[string]string{ - "0": "unknown", - "1": "ipv4", - "2": "ipv6", - "3": "ipv4z", - "4": "ipv6z", - "16": "dns", - }}, - {Tag: "if_type", - Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.2.2.1.3", Name: "ifType"}, - Mapping: map[string]string{ - "1": "other", - "2": "regular1822", - "3": "hdh1822", - "4": "ddn-x25", - "29": "ultra", - }}, - { - Symbol: profiledefinition.SymbolConfigCompat{ - Name: "cpiPduName", - OID: "1.2.3.4.8.1.2", + Symbols: []profiledefinition.SymbolConfig{ + // ifInErrors defined in instance config with a different set of metric tags from the one defined + // in the imported profile + {OID: "1.3.6.1.2.1.2.2.1.14", Name: "ifInErrors"}, + {OID: "1.3.6.1.2.1.2.2.1.20", Name: "ifOutErrors", ScaleFactor: 3}, + }, + MetricTags: []profiledefinition.MetricTagConfig{ + {Tag: "if_index", Index: 1}, + {Tag: "if_desc", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.2.2.1.2", Name: "ifDescr"}, + IndexTransform: []profiledefinition.MetricIndexTransform{ + { + Start: 1, + End: 3, + }, + { + Start: 4, + End: 6, + }, + }, }, - Match: "(\\w)(\\w+)", - Pattern: regexp.MustCompile(`(\w)(\w+)`), - Tags: map[string]string{ - "prefix": "\\1", - "suffix": "\\2", + {Tag: "ipversion", Index: 1, Mapping: map[string]string{ + "0": "unknown", + "1": "ipv4", + "2": "ipv6", + "3": "ipv4z", + "4": "ipv6z", + "16": "dns", }}, + {Tag: "if_type", + Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.2.2.1.3", Name: "ifType"}, + Mapping: map[string]string{ + "1": "other", + "2": "regular1822", + "3": "hdh1822", + "4": "ddn-x25", + "29": "ultra", + }}, + { + Symbol: profiledefinition.SymbolConfigCompat{ + Name: "cpiPduName", + OID: "1.2.3.4.8.1.2", + }, + Match: "(\\w)(\\w+)", + Pattern: regexp.MustCompile(`(\w)(\w+)`), + Tags: map[string]string{ + "prefix": "\\1", + "suffix": "\\2", + }}, + }, }, - }, - {Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4", Name: "aGlobalMetric"}}, - } - expectedMetrics = append(expectedMetrics, profiledefinition.MetricsConfig{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}) - expectedMetrics = append(expectedMetrics, profile.FixtureProfileDefinitionMap()["f5-big-ip"].Definition.Metrics...) + {Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4", Name: "aGlobalMetric"}}, + } + expectedMetrics = append(expectedMetrics, profiledefinition.MetricsConfig{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}) + expectedMetrics = append(expectedMetrics, profile.FixtureProfileDefinitionMap()["f5-big-ip"].Definition.Metrics...) - expectedMetricTags := []profiledefinition.MetricTagConfig{ - {Tag: "my_symbol", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}}, - {Tag: "my_symbol_mapped", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}, Mapping: map[string]string{"1": "one", "2": "two"}}, - { - Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}, - Match: "(\\w)(\\w+)", - Pattern: regexp.MustCompile(`(\w)(\w+)`), - Tags: map[string]string{ - "prefix": "\\1", - "suffix": "\\2", + expectedMetricTags := []profiledefinition.MetricTagConfig{ + {Tag: "my_symbol", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}}, + {Tag: "my_symbol_mapped", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}, Mapping: map[string]string{"1": "one", "2": "two"}}, + { + Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}, + Match: "(\\w)(\\w+)", + Pattern: regexp.MustCompile(`(\w)(\w+)`), + Tags: map[string]string{ + "prefix": "\\1", + "suffix": "\\2", + }, }, - }, - { - Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}, - Match: "(\\w)(\\w+)", - Pattern: regexp.MustCompile(`(\w)(\w+)`), - Tags: map[string]string{ - "some_tag": "some_tag_value", - "prefix": "\\1", - "suffix": "\\2", + { + Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}, + Match: "(\\w)(\\w+)", + Pattern: regexp.MustCompile(`(\w)(\w+)`), + Tags: map[string]string{ + "some_tag": "some_tag_value", + "prefix": "\\1", + "suffix": "\\2", + }, }, - }, - {Tag: "snmp_host", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}}, - } + {Tag: "snmp_host", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}}, + } - assert.Equal(t, expectedMetrics, config.Metrics) - assert.Equal(t, expectedMetricTags, config.MetricTags) - assert.Equal(t, []string{"snmp_profile:f5-big-ip", "device_vendor:f5", "static_tag:from_profile_root", "static_tag:from_base_profile"}, config.ProfileTags) - assert.True(t, config.ProfileProvider.HasProfile("f5-big-ip")) - assert.Equal(t, "default:1.2.3.4", config.DeviceID) - assert.Equal(t, []string{"device_namespace:default", "snmp_device:1.2.3.4"}, config.DeviceIDTags) - assert.Equal(t, "127.0.0.0/30", config.ResolvedSubnetName) - assert.Equal(t, false, config.AutodetectProfile) + profile, err := config.BuildProfile("") + require.NoError(t, err) + + assert.Equal(t, expectedMetrics, profile.Metrics) + assert.Equal(t, expectedMetricTags, profile.MetricTags) + assert.Equal(t, []string{"snmp_profile:f5-big-ip", "device_vendor:f5", "static_tag:from_profile_root", "static_tag:from_base_profile"}, profile.StaticTags) + }) } func TestDiscoveryConfigurations(t *testing.T) { @@ -310,6 +317,10 @@ profiles: assert.Nil(t, err) assert.Equal(t, []string{"device_namespace:default", "snmp_device:172.26.0.2", "device_ip:172.26.0.2", "device_id:default:172.26.0.2"}, config.GetStaticTags()) + + profile, err := config.BuildProfile("") + require.NoError(t, err) + metrics := []profiledefinition.MetricsConfig{ {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}, {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.7.1.0", Name: "IAmACounter32"}}, @@ -318,10 +329,8 @@ profiles: {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.88.1.1.1.0", Name: "IAmAnInteger"}}, } - metricsTags := []profiledefinition.MetricTagConfig(nil) - - assert.Equal(t, metrics, config.Metrics) - assert.Equal(t, metricsTags, config.MetricTags) + assert.Equal(t, metrics, profile.Metrics) + assert.Empty(t, profile.MetricTags) } func TestInlineProfileConfiguration(t *testing.T) { @@ -357,30 +366,31 @@ profiles: name: myMetric `) config, err := NewCheckConfig(rawInstanceConfig, rawInitConfig) + require.NoError(t, err) - assert.Nil(t, err) assert.Equal(t, []string{"device_namespace:default", "snmp_device:1.2.3.4", "device_ip:1.2.3.4", "device_id:default:1.2.3.4"}, config.GetStaticTags()) - metrics := []profiledefinition.MetricsConfig{ - {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}, - {MIB: "MY-PROFILE-MIB", Symbol: profiledefinition.SymbolConfig{OID: "1.4.5", Name: "myMetric"}, MetricType: profiledefinition.ProfileMetricTypeGauge}, - } - - metricsTags := []profiledefinition.MetricTagConfig{ - {Tag: "snmp_host", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}}, - } - assert.Equal(t, "123", config.CommunityString) - assert.Equal(t, metrics, config.Metrics) - assert.Equal(t, metricsTags, config.MetricTags) assert.True(t, config.ProfileProvider.HasProfile("f5-big-ip")) assert.True(t, config.ProfileProvider.HasProfile("inline-profile")) assert.Equal(t, "default:1.2.3.4", config.DeviceID) assert.Equal(t, []string{"device_namespace:default", "snmp_device:1.2.3.4"}, config.DeviceIDTags) - assert.Equal(t, false, config.AutodetectProfile) assert.Equal(t, 3600, config.DiscoveryInterval) assert.Equal(t, 3, config.DiscoveryAllowedFailures) assert.Equal(t, 5, config.DiscoveryWorkers) assert.Equal(t, 5, config.Workers) + + metrics := []profiledefinition.MetricsConfig{ + {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}, + {MIB: "MY-PROFILE-MIB", Symbol: profiledefinition.SymbolConfig{OID: "1.4.5", Name: "myMetric"}, MetricType: profiledefinition.ProfileMetricTypeGauge}, + } + + metricsTags := []profiledefinition.MetricTagConfig{ + {Tag: "snmp_host", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.5.0", Name: "sysName"}}, + } + profile, err := config.BuildProfile("") + require.NoError(t, err) + assert.Equal(t, metrics, profile.Metrics) + assert.Equal(t, metricsTags, profile.MetricTags) } func TestDefaultConfigurations(t *testing.T) { @@ -401,16 +411,17 @@ community_string: abc assert.Equal(t, uint16(161), config.Port) assert.Equal(t, 2, config.Timeout) assert.Equal(t, 3, config.Retries) - metrics := []profiledefinition.MetricsConfig{{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}} - - var metricsTags []profiledefinition.MetricTagConfig - assert.Equal(t, metrics, config.Metrics) - assert.Equal(t, metricsTags, config.MetricTags) - // assert.Equal(t, 2, len(config.Profiles)) assert.True(t, config.ProfileProvider.HasProfile("f5-big-ip")) assert.True(t, config.ProfileProvider.HasProfile("another_profile")) assert.Equal(t, profile.FixtureProfileDefinitionMap()["f5-big-ip"].Definition.Metrics, config.ProfileProvider.GetProfile("f5-big-ip").Definition.Metrics) + + profile, err := config.BuildProfile("") + require.NoError(t, err) + + metrics := []profiledefinition.MetricsConfig{{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}} + assert.Equal(t, metrics, profile.Metrics) + assert.Empty(t, profile.MetricTags) } func TestPortConfiguration(t *testing.T) { @@ -575,14 +586,17 @@ global_metrics: name: aGlobalMetric `) config, err := NewCheckConfig(rawInstanceConfig, rawInitConfig) - assert.Nil(t, err) + require.NoError(t, err) + + profile, err := config.BuildProfile("") + require.NoError(t, err) metrics := []profiledefinition.MetricsConfig{ {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.1", Name: "ifNumber"}}, {Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4", Name: "aGlobalMetric"}}, {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}, } - assert.Equal(t, metrics, config.Metrics) + assert.Equal(t, metrics, profile.Metrics) } func TestUseGlobalMetricsFalse(t *testing.T) { @@ -606,13 +620,16 @@ global_metrics: name: aGlobalMetric `) config, err := NewCheckConfig(rawInstanceConfig, rawInitConfig) - assert.Nil(t, err) + require.NoError(t, err) + + profile, err := config.BuildProfile("") + require.NoError(t, err) metrics := []profiledefinition.MetricsConfig{ {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.2.1", Name: "aInstanceMetric"}}, {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.3.0", Name: "sysUpTimeInstance"}}, } - assert.Equal(t, metrics, config.Metrics) + assert.Equal(t, metrics, profile.Metrics) } func Test_NewCheckConfig_errors(t *testing.T) { @@ -624,23 +641,6 @@ func Test_NewCheckConfig_errors(t *testing.T) { rawInitConfig []byte expectedErrors []string }{ - { - name: "unknown profile", - // language=yaml - rawInstanceConfig: []byte(` -ip_address: 1.2.3.4 -profile: does-not-exist -`), - // language=yaml - rawInitConfig: []byte(` -profiles: - f5-big-ip: - definition_file: f5-big-ip.yaml -`), - expectedErrors: []string{ - "failed to refresh with profile `does-not-exist`: unknown profile `does-not-exist`", - }, - }, { name: "validation errors", // language=yaml @@ -699,6 +699,7 @@ network_address: 10.0.0.0/xx t.Run(tt.name, func(t *testing.T) { _, err := NewCheckConfig(tt.rawInstanceConfig, tt.rawInitConfig) for _, errStr := range tt.expectedErrors { + require.NotNil(t, err, "expected error %q", errStr) assert.Contains(t, err.Error(), errStr) } }) @@ -806,254 +807,6 @@ func Test_snmpConfig_getDeviceIDTags(t *testing.T) { assert.Equal(t, expectedTags, actualTags) } -func Test_snmpConfig_setProfile(t *testing.T) { - metrics := []profiledefinition.MetricsConfig{ - {Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}}, - { - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "1.2.3.4.6", - Name: "abc", - }, - }, - MetricTags: profiledefinition.MetricTagConfigList{ - profiledefinition.MetricTagConfig{ - Symbol: profiledefinition.SymbolConfigCompat{ - OID: "1.2.3.4.7", - }, - }, - }, - }, - } - profile1 := profiledefinition.ProfileDefinition{ - Device: profiledefinition.DeviceMeta{ - Vendor: "a-vendor", - }, - Metrics: metrics, - MetricTags: []profiledefinition.MetricTagConfig{ - {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, - }, - Metadata: profiledefinition.MetadataConfig{ - "device": { - Fields: map[string]profiledefinition.MetadataField{ - "description": { - Symbol: profiledefinition.SymbolConfig{ - OID: "1.3.6.1.2.1.1.99.3.0", - Name: "sysDescr", - }, - }, - "name": { - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "1.3.6.1.2.1.1.99.1.0", - Name: "symbol1", - }, - { - OID: "1.3.6.1.2.1.1.99.2.0", - Name: "symbol2", - }, - }, - }, - }, - }, - "interface": { - Fields: map[string]profiledefinition.MetadataField{ - "oper_status": { - Symbol: profiledefinition.SymbolConfig{ - OID: "1.3.6.1.2.1.2.2.1.99", - Name: "someIfSymbol", - }, - }, - }, - IDTags: profiledefinition.MetricTagConfigList{ - { - Tag: "interface", - Symbol: profiledefinition.SymbolConfigCompat{ - OID: "1.3.6.1.2.1.31.1.1.1.1", - Name: "ifName", - }, - }, - }, - }, - }, - SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"}, - } - profile2 := profiledefinition.ProfileDefinition{ - Device: profiledefinition.DeviceMeta{Vendor: "b-vendor"}, - Metrics: []profiledefinition.MetricsConfig{{Symbol: profiledefinition.SymbolConfig{OID: "2.3.4.5.6.1", Name: "b-metric"}}}, - MetricTags: []profiledefinition.MetricTagConfig{ - {Tag: "btag", Symbol: profiledefinition.SymbolConfigCompat{OID: "2.3.4.5.6.2", Name: "b-tag-name"}}, - }, - Metadata: profiledefinition.MetadataConfig{ - "device": { - Fields: map[string]profiledefinition.MetadataField{ - "b-description": { - Symbol: profiledefinition.SymbolConfig{ - OID: "2.3.4.5.6.3", - Name: "sysDescr", - }, - }, - "b-name": { - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "2.3.4.5.6.4", - Name: "b-symbol1", - }, - { - OID: "2.3.4.5.6.5", - Name: "b-symbol2", - }, - }, - }, - }, - }, - "interface": { - Fields: map[string]profiledefinition.MetadataField{ - "oper_status": { - Symbol: profiledefinition.SymbolConfig{ - OID: "2.3.4.5.6.6", - Name: "b-someIfSymbol", - }, - }, - }, - IDTags: profiledefinition.MetricTagConfigList{ - { - Tag: "b-interface", - Symbol: profiledefinition.SymbolConfigCompat{ - OID: "2.3.4.5.6.7", - Name: "b-ifName", - }, - }, - }, - }, - }, - SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"}, - } - - mockProfiles := profile.StaticProvider(profile.ProfileConfigMap{ - "profile1": profile.ProfileConfig{ - Definition: profile1, - }, - "profile2": profile.ProfileConfig{ - Definition: profile2, - }, - }) - c := &CheckConfig{ - IPAddress: "1.2.3.4", - ProfileProvider: mockProfiles, - } - err := c.SetProfile("f5") - assert.EqualError(t, err, "unknown profile `f5`") - - err = c.SetProfile("profile1") - assert.NoError(t, err) - - assert.Equal(t, "profile1", c.ProfileName) - assert.Equal(t, &profile1, c.GetProfileDef()) - assert.Equal(t, metrics, c.Metrics) - assert.Equal(t, []profiledefinition.MetricTagConfig{ - {Tag: "location", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.3.6.1.2.1.1.6.0", Name: "sysLocation"}}, - }, c.MetricTags) - assert.Equal(t, OidConfig{ - ScalarOids: []string{"1.2.3.4.5", "1.3.6.1.2.1.1.6.0"}, - ColumnOids: []string{"1.2.3.4.6", "1.2.3.4.7"}, - }, c.OidConfig) - assert.Equal(t, []string{"snmp_profile:profile1", "device_vendor:a-vendor"}, c.ProfileTags) - - c = &CheckConfig{ - IPAddress: "1.2.3.4", - ProfileProvider: mockProfiles, - CollectDeviceMetadata: true, - CollectTopology: false, - } - err = c.SetProfile("profile1") - assert.NoError(t, err) - assert.Equal(t, OidConfig{ - ScalarOids: []string{ - "1.2.3.4.5", - "1.3.6.1.2.1.1.6.0", - "1.3.6.1.2.1.1.99.1.0", - "1.3.6.1.2.1.1.99.2.0", - "1.3.6.1.2.1.1.99.3.0", - }, - ColumnOids: []string{ - "1.2.3.4.6", - "1.2.3.4.7", - "1.3.6.1.2.1.2.2.1.99", - "1.3.6.1.2.1.31.1.1.1.1", - "1.3.6.1.2.1.4.20.1.2", - "1.3.6.1.2.1.4.20.1.3", - }, - }, c.OidConfig) - - // With metadata disabled - c.CollectDeviceMetadata = false - err = c.SetProfile("profile1") - assert.NoError(t, err) - assert.Equal(t, OidConfig{ - ScalarOids: []string{ - "1.2.3.4.5", - "1.3.6.1.2.1.1.6.0", - }, - ColumnOids: []string{ - "1.2.3.4.6", - "1.2.3.4.7", - }, - }, c.OidConfig) - - c = &CheckConfig{ - IPAddress: "1.2.3.4", - ProfileProvider: mockProfiles, - CollectDeviceMetadata: true, - CollectTopology: false, - } - c.RequestedMetrics = append(c.RequestedMetrics, - profiledefinition.MetricsConfig{Symbol: profiledefinition.SymbolConfig{OID: "3.1", Name: "global-metric"}}) - c.RequestedMetricTags = append(c.RequestedMetricTags, - profiledefinition.MetricTagConfig{Tag: "global-tag", Symbol: profiledefinition.SymbolConfigCompat{OID: "3.2", Name: "globalSymbol"}}) - err = c.SetProfile("profile1") - assert.NoError(t, err) - assert.Equal(t, OidConfig{ - ScalarOids: []string{ - "1.2.3.4.5", - "1.3.6.1.2.1.1.6.0", - "1.3.6.1.2.1.1.99.1.0", - "1.3.6.1.2.1.1.99.2.0", - "1.3.6.1.2.1.1.99.3.0", - "3.1", - "3.2", - }, - ColumnOids: []string{ - "1.2.3.4.6", - "1.2.3.4.7", - "1.3.6.1.2.1.2.2.1.99", - "1.3.6.1.2.1.31.1.1.1.1", - "1.3.6.1.2.1.4.20.1.2", - "1.3.6.1.2.1.4.20.1.3", - }, - }, c.OidConfig) - err = c.SetProfile("profile2") - assert.NoError(t, err) - assert.Equal(t, OidConfig{ - ScalarOids: []string{ - "2.3.4.5.6.1", - "2.3.4.5.6.2", - "2.3.4.5.6.3", - "2.3.4.5.6.4", - "2.3.4.5.6.5", - "3.1", - "3.2", - }, - ColumnOids: []string{ - "1.3.6.1.2.1.4.20.1.2", - "1.3.6.1.2.1.4.20.1.3", - "2.3.4.5.6.6", - "2.3.4.5.6.7", - }, - }, c.OidConfig) - -} - func Test_getSubnetFromTags(t *testing.T) { subnet, err := getSubnetFromTags([]string{"aa", "bb"}) assert.Equal(t, "", subnet) @@ -1913,10 +1666,6 @@ func TestCheckConfig_Copy(t *testing.T) { PrivProtocol: "des", PrivKey: "123", ContextName: "", - OidConfig: OidConfig{ - ScalarOids: []string{"1.2.3"}, - ColumnOids: []string{"1.2.3", "2.3.4"}, - }, RequestedMetrics: []profiledefinition.MetricsConfig{ { Symbol: profiledefinition.SymbolConfig{ @@ -1928,17 +1677,6 @@ func TestCheckConfig_Copy(t *testing.T) { RequestedMetricTags: []profiledefinition.MetricTagConfig{ {Tag: "my_symbol", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}}, }, - Metrics: []profiledefinition.MetricsConfig{ - { - Symbol: profiledefinition.SymbolConfig{ - OID: "1.2", - Name: "abc", - }, - }, - }, - MetricTags: []profiledefinition.MetricTagConfig{ - {Tag: "my_symbol", Symbol: profiledefinition.SymbolConfigCompat{OID: "1.2.3", Name: "mySymbol"}}, - }, OidBatchSize: 10, BulkMaxRepetitions: 10, ProfileProvider: profile.StaticProvider(profile.ProfileConfigMap{"f5-big-ip": profile.ProfileConfig{ @@ -1946,7 +1684,6 @@ func TestCheckConfig_Copy(t *testing.T) { Device: profiledefinition.DeviceMeta{Vendor: "f5"}, }, }}), - ProfileTags: []string{"profile_tag:atag"}, ProfileName: "f5", ExtraTags: []string{"ExtraTags:tag"}, InstanceTags: []string{"InstanceTags:tag"}, @@ -1956,7 +1693,6 @@ func TestCheckConfig_Copy(t *testing.T) { DeviceID: "123", DeviceIDTags: []string{"DeviceIDTags:tag"}, ResolvedSubnetName: "1.2.3.4/28", - AutodetectProfile: true, MinCollectionInterval: 120, } configCopy := config.Copy() @@ -1965,9 +1701,6 @@ func TestCheckConfig_Copy(t *testing.T) { assert.NotSame(t, &config.RequestedMetrics, &configCopy.RequestedMetrics) assert.NotSame(t, &config.RequestedMetricTags, &configCopy.RequestedMetricTags) - assert.NotSame(t, &config.Metrics, &configCopy.Metrics) - assert.NotSame(t, &config.MetricTags, &configCopy.MetricTags) - assert.NotSame(t, &config.ProfileTags, &configCopy.ProfileTags) assert.NotSame(t, &config.ExtraTags, &configCopy.ExtraTags) assert.NotSame(t, &config.InstanceTags, &configCopy.InstanceTags) assert.NotSame(t, &config.DeviceIDTags, &configCopy.DeviceIDTags) diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go index 6f57817fddec11..90fea616b8c92e 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" "reflect" "runtime" "strings" @@ -54,6 +55,60 @@ const ( checkDurationThreshold = 30 // Thirty seconds ) +type profileCache struct { + sysObjectID string + timestamp time.Time + profile *profiledefinition.ProfileDefinition + err error + scalarOIDs []string + columnOIDs []string +} + +// GetProfile returns the cached profile, or an empty profile if the cache is empty. +// Use this when you need to make sure you have *some* profile. +func (pc *profileCache) GetProfile() profiledefinition.ProfileDefinition { + if pc.profile == nil { + return profiledefinition.ProfileDefinition{ + Metadata: make(profiledefinition.MetadataConfig), + } + } + return *pc.profile +} + +func (pc *profileCache) Update(sysObjectID string, now time.Time, config *checkconfig.CheckConfig) (profiledefinition.ProfileDefinition, error) { + if pc.IsOutdated(sysObjectID, config.ProfileName, config.ProfileProvider.LastUpdated()) { + // we cache the value even if there's an error, because an error indicates that + // the ProfileProvider couldn't find a match for either config.ProfileName or + // the given sysObjectID, and we're going to have the same error if we call this + // again without either the sysObjectID or the ProfileProvider changing. + pc.sysObjectID = sysObjectID + pc.timestamp = now + profile, err := config.BuildProfile(sysObjectID) + pc.profile = &profile + pc.err = err + pc.scalarOIDs, pc.columnOIDs = pc.profile.SplitOIDs(config.CollectDeviceMetadata) + } + return pc.GetProfile(), pc.err +} + +func (pc *profileCache) IsOutdated(sysObjectID string, profileName string, lastUpdate time.Time) bool { + if pc.profile == nil { + return true + } + if profileName == checkconfig.ProfileNameInline { + // inline profiles never change, so if we have a profile it's up-to-date. + return false + } + if profileName == checkconfig.ProfileNameAuto && pc.sysObjectID != sysObjectID { + // If we're auto-detecting profiles and the sysObjectID has changed, we're out of date. + return true + } + // If we get here then either we're auto-detecting but the sysobjectid hasn't + // changed, or we have a static name; either way we're out of date if and only + // if the profile provider has updated. + return pc.timestamp.Before(lastUpdate) +} + // DeviceCheck hold info necessary to collect info for a single device type DeviceCheck struct { config *checkconfig.CheckConfig @@ -67,6 +122,7 @@ type DeviceCheck struct { interfaceBandwidthState report.InterfaceBandwidthState cacheKey string agentConfig config.Component + profileCache profileCache } const cacheKeyPrefix = "snmp-tags" @@ -99,6 +155,10 @@ func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFa } d.readTagsFromCache() + if _, err := d.profileCache.Update("", time.Now(), d.config); err != nil { + // This could happen e.g. if the config references a profile that hasn't been loaded yet. + _ = log.Warnf("failed to refresh profile cache: %s", err) + } return &d, nil } @@ -162,7 +222,7 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error { var deviceStatus metadata.DeviceStatus var pingStatus metadata.DeviceStatus - deviceReachable, dynamicTags, values, checkErr := d.getValuesAndTags() + deviceReachable, profile, dynamicTags, values, checkErr := d.getValuesAndTags() tags := utils.CopyStrings(staticTags) if checkErr != nil { @@ -182,7 +242,7 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error { d.sender.Gauge(deviceReachableMetric, utils.BoolToFloat64(deviceReachable), metricTags) d.sender.Gauge(deviceUnreachableMetric, utils.BoolToFloat64(!deviceReachable), metricTags) if values != nil { - d.sender.ReportMetrics(d.config.Metrics, values, metricTags, d.config.DeviceID) + d.sender.ReportMetrics(profile.Metrics, values, metricTags, d.config.DeviceID) } // Get a system appropriate ping check @@ -231,7 +291,8 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error { deviceDiagnosis := d.diagnoses.Report() - d.sender.ReportNetworkDeviceMetadata(d.config, values, deviceMetadataTags, collectionTime, deviceStatus, pingStatus, deviceDiagnosis) + d.sender.ReportNetworkDeviceMetadata(d.config, profile, values, deviceMetadataTags, collectionTime, + deviceStatus, pingStatus, deviceDiagnosis) } d.submitTelemetryMetrics(startTime, metricTags) @@ -255,7 +316,11 @@ func (d *DeviceCheck) buildExternalTags() []string { return configUtils.GetConfiguredTags(d.agentConfig, false) } -func (d *DeviceCheck) getValuesAndTags() (bool, []string, *valuestore.ResultValueStore, error) { +// getValuesAndTags build (or fetches from cache) a profile describing all the +// metrics, tags, etc. to be fetched for this device, fetches the resulting +// values, and returns (reachable, profile, tags, values, error). In the event +// of an error, the returned profile will be the last cached profile. +func (d *DeviceCheck) getValuesAndTags() (bool, profiledefinition.ProfileDefinition, []string, *valuestore.ResultValueStore, error) { var deviceReachable bool var checkErrors []string var tags []string @@ -264,7 +329,8 @@ func (d *DeviceCheck) getValuesAndTags() (bool, []string, *valuestore.ResultValu connErr := d.session.Connect() if connErr != nil { d.diagnoses.Add("error", "SNMP_FAILED_TO_OPEN_CONNECTION", "Agent failed to open connection.") - return false, tags, nil, fmt.Errorf("snmp connection error: %s", connErr) + // cannot connect -> use cached profile + return false, d.profileCache.GetProfile(), tags, nil, fmt.Errorf("snmp connection error: %s", connErr) } defer func() { err := d.session.Close() @@ -287,15 +353,16 @@ func (d *DeviceCheck) getValuesAndTags() (bool, []string, *valuestore.ResultValu } } - err = d.detectMetricsToMonitor(d.session) + profile, err := d.detectMetricsToMonitor(d.session) if err != nil { d.diagnoses.Add("error", "SNMP_FAILED_TO_DETECT_PROFILE", "Agent failed to detect a profile for this network device.") checkErrors = append(checkErrors, fmt.Sprintf("failed to autodetect profile: %s", err)) } - tags = append(tags, d.config.ProfileTags...) + tags = append(tags, profile.StaticTags...) - valuesStore, err := fetch.Fetch(d.session, d.config) + valuesStore, err := fetch.Fetch(d.session, d.profileCache.scalarOIDs, d.profileCache.columnOIDs, d.config.OidBatchSize, + d.config.BulkMaxRepetitions) if log.ShouldLog(log.DebugLvl) { log.Debugf("fetched values: %v", valuestore.ResultValueStoreAsString(valuesStore)) } @@ -303,37 +370,38 @@ func (d *DeviceCheck) getValuesAndTags() (bool, []string, *valuestore.ResultValu if err != nil { checkErrors = append(checkErrors, fmt.Sprintf("failed to fetch values: %s", err)) } else { - tags = append(tags, d.sender.GetCheckInstanceMetricTags(d.config.MetricTags, valuesStore)...) + tags = append(tags, d.sender.GetCheckInstanceMetricTags(profile.MetricTags, valuesStore)...) } var joinedError error if len(checkErrors) > 0 { joinedError = errors.New(strings.Join(checkErrors, "; ")) } - return deviceReachable, tags, valuesStore, joinedError + return deviceReachable, profile, tags, valuesStore, joinedError } -func (d *DeviceCheck) detectMetricsToMonitor(sess session.Session) error { - if d.config.AutodetectProfile { +func (d *DeviceCheck) getSysObjectID(sess session.Session) (string, error) { + if d.config.ProfileName == checkconfig.ProfileNameAuto { // detect using sysObjectID sysObjectID, err := session.FetchSysObjectID(sess) if err != nil { - return fmt.Errorf("failed to fetch sysobjectid: %s", err) - } - profile, err := d.config.ProfileProvider.GetProfileNameForSysObjectID(sysObjectID) - if err != nil { - return fmt.Errorf("failed to get profile sys object id for `%s`: %s", sysObjectID, err) - } - if profile != d.config.ProfileName { - log.Debugf("detected profile change: %s -> %s", d.config.ProfileName, profile) - err = d.config.SetProfile(profile) - if err != nil { - // Should not happen since the profile is one of those we matched in GetProfileNameForSysObjectID - return fmt.Errorf("failed to refresh with profile `%s` detected using sysObjectID `%s`: %s", profile, sysObjectID, err) - } + return "", fmt.Errorf("failed to fetch sysobjectid: %w", err) } + return sysObjectID, nil + } + return "", nil +} + +func (d *DeviceCheck) detectMetricsToMonitor(sess session.Session) (profiledefinition.ProfileDefinition, error) { + sysObjectID, err := d.getSysObjectID(sess) + if err != nil { + return d.profileCache.GetProfile(), err + } + profile, err := d.profileCache.Update(sysObjectID, time.Now(), d.config) + if err != nil { + return profile, fmt.Errorf("failed to refresh profile cache: %w", err) } - return nil + return profile, nil } func (d *DeviceCheck) submitTelemetryMetrics(startTime time.Time, tags []string) { diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index 92a9eda81400f0..cecad7db1b9269 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -70,7 +70,7 @@ profiles: deviceCk.SetSender(report.NewMetricSender(sender, "", nil, report.MakeInterfaceBandwidthState())) - (sess. + sess. SetStr("1.3.6.1.2.1.1.1.0", "my_desc"). SetObj("1.3.6.1.2.1.1.2.0", "1.3.6.1.4.1.3375.2.1.3.4.1"). SetTime("1.3.6.1.2.1.1.3.0", 20). @@ -96,7 +96,7 @@ profiles: // f5-specific sysStatMemoryTotal SetInt("1.3.6.1.4.1.3375.2.1.1.2.1.44.0", 30). // Fake metric specific to another_profile - SetInt("1.3.6.1.2.1.1.999.0", 100)) + SetInt("1.3.6.1.2.1.1.999.0", 100) err = deviceCk.Run(time.Now()) assert.Nil(t, err) @@ -133,8 +133,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.sysStatMemoryTotal", []string{"unknown_symbol:100"}) // f5 has 5 metrics, 2 tags - assert.Len(t, deviceCk.config.Metrics, 5) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 5) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) sender.ResetCalls() @@ -164,8 +164,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.anotherMetric", []string{"some_tag:some_tag_value"}) // Check that we replaced the metrics, instead of just adding to them - assert.Len(t, deviceCk.config.Metrics, 2) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 2) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) } func TestProfileDetectionPreservesGlobals(t *testing.T) { @@ -779,8 +779,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.sysStatMemoryTotal", []string{"unknown_symbol:100"}) // f5 has 5 metrics, 2 tags - assert.Len(t, deviceCk.config.Metrics, 5) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 5) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) sender.ResetCalls() @@ -810,8 +810,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.anotherMetric", []string{"some_tag:some_tag_value"}) // Check that we replaced the metrics, instead of just adding to them - assert.Len(t, deviceCk.config.Metrics, 2) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 2) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) // Assert Ping Metrics sender.AssertMetric(t, "Gauge", pingReachableMetric, float64(1), "", snmpTags) @@ -926,8 +926,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.sysStatMemoryTotal", []string{"unknown_symbol:100"}) // f5 has 5 metrics, 2 tags - assert.Len(t, deviceCk.config.Metrics, 5) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 5) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) sender.ResetCalls() @@ -957,8 +957,8 @@ profiles: sender.AssertMetricNotTaggedWith(t, "Gauge", "snmp.anotherMetric", []string{"some_tag:some_tag_value"}) // Check that we replaced the metrics, instead of just adding to them - assert.Len(t, deviceCk.config.Metrics, 2) - assert.Len(t, deviceCk.config.MetricTags, 2) + assert.Len(t, deviceCk.profileCache.profile.Metrics, 2) + assert.Len(t, deviceCk.profileCache.profile.MetricTags, 2) // Assert Ping reachability metrics are sent sender.AssertMetric(t, "Gauge", pingReachableMetric, float64(0), "", snmpTags) diff --git a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go index 4ca8ccc302bf28..c4aa83a01a663a 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go @@ -7,6 +7,7 @@ package discovery import ( "fmt" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/profile" "net" "testing" "time" @@ -57,6 +58,7 @@ func TestDiscovery(t *testing.T) { DiscoveryInterval: 3600, DiscoveryWorkers: 1, IgnoredIPAddresses: map[string]bool{"192.168.0.5": true}, + ProfileProvider: profile.StaticProvider(nil), } discovery := NewDiscovery(checkConfig, sessionFactory, config) discovery.Start() @@ -107,6 +109,7 @@ func TestDiscoveryCache(t *testing.T) { CommunityString: "public", DiscoveryInterval: 3600, DiscoveryWorkers: 1, + ProfileProvider: profile.StaticProvider(nil), } discovery := NewDiscovery(checkConfig, sessionFactory, config) discovery.Start() @@ -139,6 +142,7 @@ func TestDiscoveryCache(t *testing.T) { CommunityString: "public", DiscoveryInterval: 3600, DiscoveryWorkers: 0, // no workers, the devices will be loaded from cache + ProfileProvider: profile.StaticProvider(nil), } discovery2 := NewDiscovery(checkConfig, sessionFactory, config) discovery2.Start() @@ -181,6 +185,7 @@ func TestDiscoveryTicker(t *testing.T) { CommunityString: "public", DiscoveryInterval: 1, DiscoveryWorkers: 1, + ProfileProvider: profile.StaticProvider(nil), } discovery := NewDiscovery(checkConfig, sessionFactory, config) discovery.Start() @@ -200,6 +205,7 @@ func TestDiscovery_checkDevice(t *testing.T) { CommunityString: "public", DiscoveryInterval: 1, DiscoveryWorkers: 1, + ProfileProvider: profile.StaticProvider(nil), } ipAddr, ipNet, err := net.ParseCIDR(checkConfig.Network) assert.Nil(t, err) @@ -319,6 +325,7 @@ func TestDiscovery_createDevice(t *testing.T) { DiscoveryWorkers: 1, DiscoveryAllowedFailures: 3, Namespace: "default", + ProfileProvider: profile.StaticProvider(nil), } discovery := NewDiscovery(checkConfig, session.NewMockSession, config) ipAddr, ipNet, err := net.ParseCIDR(checkConfig.Network) diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch.go index 54987adef0ed25..b55d5cbf602d53 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch.go @@ -12,7 +12,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/session" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/valuestore" ) @@ -36,19 +35,21 @@ func (c columnFetchStrategy) String() string { } // Fetch oid values from device -// TODO: pass only specific configs instead of the whole CheckConfig -func Fetch(sess session.Session, config *checkconfig.CheckConfig) (*valuestore.ResultValueStore, error) { +func Fetch(sess session.Session, scalarOIDs, columnOIDs []string, batchSize int, + bulkMaxRepetitions uint32) (*valuestore.ResultValueStore, error) { // fetch scalar values - scalarResults, err := fetchScalarOidsWithBatching(sess, config.OidConfig.ScalarOids, config.OidBatchSize) + scalarResults, err := fetchScalarOidsWithBatching(sess, scalarOIDs, batchSize) if err != nil { return nil, fmt.Errorf("failed to fetch scalar oids with batching: %v", err) } - columnResults, err := fetchColumnOidsWithBatching(sess, config.OidConfig.ColumnOids, config.OidBatchSize, config.BulkMaxRepetitions, useGetBulk) + columnResults, err := fetchColumnOidsWithBatching(sess, columnOIDs, batchSize, + bulkMaxRepetitions, useGetBulk) if err != nil { log.Debugf("failed to fetch oids with GetBulk batching: %v", err) - columnResults, err = fetchColumnOidsWithBatching(sess, config.OidConfig.ColumnOids, config.OidBatchSize, config.BulkMaxRepetitions, useGetNext) + columnResults, err = fetchColumnOidsWithBatching(sess, columnOIDs, batchSize, bulkMaxRepetitions, + useGetNext) if err != nil { return nil, fmt.Errorf("failed to fetch oids with GetNext batching: %v", err) } diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go index 63c2c4ce937f37..e219144b768b8e 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go @@ -370,14 +370,9 @@ func Test_fetchColumnOidsBatch_usingGetBulkAndGetNextFallback(t *testing.T) { sess.On("GetNext", []string{"1.1.3"}).Return(&secondBatchPacket1, nil) sess.On("GetNext", []string{"1.1.3.1"}).Return(&secondBatchPacket2, nil) - config := &checkconfig.CheckConfig{ - BulkMaxRepetitions: checkconfig.DefaultBulkMaxRepetitions, - OidBatchSize: 2, - OidConfig: checkconfig.OidConfig{ - ColumnOids: []string{"1.1.1", "1.1.2", "1.1.3"}, - }, - } - columnValues, err := Fetch(sess, config) + columnOIDs := []string{"1.1.1", "1.1.2", "1.1.3"} + + columnValues, err := Fetch(sess, nil, columnOIDs, 2, checkconfig.DefaultBulkMaxRepetitions) assert.Nil(t, err) expectedColumnValues := &valuestore.ResultValueStore{ @@ -719,41 +714,32 @@ func Test_fetchScalarOids_v1NoSuchName_errorIndexTooLow(t *testing.T) { func Test_fetchValues_errors(t *testing.T) { tests := []struct { name string - config checkconfig.CheckConfig + maxReps uint32 + batchSize int + ScalarOIDs []string + ColumnOIDs []string bulkPacket gosnmp.SnmpPacket expectedError error }{ { - name: "invalid batch size", - config: checkconfig.CheckConfig{ - BulkMaxRepetitions: checkconfig.DefaultBulkMaxRepetitions, - OidConfig: checkconfig.OidConfig{ - ScalarOids: []string{"1.1", "1.2"}, - }, - }, + name: "invalid batch size", + maxReps: checkconfig.DefaultBulkMaxRepetitions, + ScalarOIDs: []string{"1.1", "1.2"}, expectedError: fmt.Errorf("failed to fetch scalar oids with batching: failed to create oid batches: batch size must be positive. invalid size: 0"), }, { - name: "get fetch error", - config: checkconfig.CheckConfig{ - BulkMaxRepetitions: checkconfig.DefaultBulkMaxRepetitions, - OidBatchSize: 10, - OidConfig: checkconfig.OidConfig{ - ScalarOids: []string{"1.1", "2.2"}, - }, - }, + name: "get fetch error", + maxReps: checkconfig.DefaultBulkMaxRepetitions, + batchSize: 10, + ScalarOIDs: []string{"1.1", "2.2"}, expectedError: fmt.Errorf("failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: error getting oids `[1.1 2.2]`: get error"), }, { - name: "bulk fetch error", - config: checkconfig.CheckConfig{ - BulkMaxRepetitions: checkconfig.DefaultBulkMaxRepetitions, - OidBatchSize: 10, - OidConfig: checkconfig.OidConfig{ - ScalarOids: []string{}, - ColumnOids: []string{"1.1", "2.2"}, - }, - }, + name: "bulk fetch error", + maxReps: checkconfig.DefaultBulkMaxRepetitions, + batchSize: 10, + ScalarOIDs: []string{}, + ColumnOIDs: []string{"1.1", "2.2"}, expectedError: fmt.Errorf("failed to fetch oids with GetNext batching: failed to fetch column oids: fetch column: failed getting oids `[1.1 2.2]` using GetNext: getnext error"), }, } @@ -764,7 +750,7 @@ func Test_fetchValues_errors(t *testing.T) { sess.On("GetBulk", []string{"1.1", "2.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) sess.On("GetNext", []string{"1.1", "2.2"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("getnext error")) - _, err := Fetch(sess, &tt.config) + _, err := Fetch(sess, tt.ScalarOIDs, tt.ColumnOIDs, tt.batchSize, tt.maxReps) assert.Equal(t, tt.expectedError, err) }) diff --git a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go index 9bc800985b18b6..aa18994fd3fd3e 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go @@ -51,13 +51,13 @@ var supportedDeviceTypes = map[string]bool{ } // ReportNetworkDeviceMetadata reports device metadata -func (ms *MetricSender) ReportNetworkDeviceMetadata(config *checkconfig.CheckConfig, store *valuestore.ResultValueStore, origTags []string, collectTime time.Time, deviceStatus devicemetadata.DeviceStatus, pingStatus devicemetadata.DeviceStatus, diagnoses []devicemetadata.DiagnosisMetadata) { +func (ms *MetricSender) ReportNetworkDeviceMetadata(config *checkconfig.CheckConfig, profile profiledefinition.ProfileDefinition, store *valuestore.ResultValueStore, origTags []string, collectTime time.Time, deviceStatus devicemetadata.DeviceStatus, pingStatus devicemetadata.DeviceStatus, diagnoses []devicemetadata.DiagnosisMetadata) { tags := utils.CopyStrings(origTags) tags = sortutil.UniqInPlace(tags) - metadataStore := buildMetadataStore(config.Metadata, store) + metadataStore := buildMetadataStore(profile.Metadata, store) - devices := []devicemetadata.DeviceMetadata{buildNetworkDeviceMetadata(config.DeviceID, config.DeviceIDTags, config, metadataStore, tags, deviceStatus, pingStatus)} + devices := []devicemetadata.DeviceMetadata{buildNetworkDeviceMetadata(config.DeviceID, config.DeviceIDTags, config, profile, metadataStore, tags, deviceStatus, pingStatus)} interfaces := buildNetworkInterfacesMetadata(config.DeviceID, metadataStore) ipAddresses := buildNetworkIPAddressesMetadata(config.DeviceID, metadataStore) @@ -191,8 +191,9 @@ func buildMetadataStore(metadataConfigs profiledefinition.MetadataConfig, values return metadataStore } -func buildNetworkDeviceMetadata(deviceID string, idTags []string, config *checkconfig.CheckConfig, store *metadata.Store, tags []string, deviceStatus devicemetadata.DeviceStatus, pingStatus devicemetadata.DeviceStatus) devicemetadata.DeviceMetadata { - var vendor, sysName, sysDescr, sysObjectID, location, serialNumber, version, productName, model, osName, osVersion, osHostname, deviceType string +func buildNetworkDeviceMetadata(deviceID string, idTags []string, config *checkconfig.CheckConfig, profile profiledefinition.ProfileDefinition, store *metadata.Store, tags []string, deviceStatus devicemetadata.DeviceStatus, pingStatus devicemetadata.DeviceStatus) devicemetadata.DeviceMetadata { + var vendor, sysName, sysDescr, sysObjectID, location, serialNumber, version, productName, model, osName, osVersion, osHostname, deviceType, profileName string + var profileVersion uint64 if store != nil { sysName = store.GetScalarAsString("device.name") sysDescr = store.GetScalarAsString("device.description") @@ -209,10 +210,10 @@ func buildNetworkDeviceMetadata(deviceID string, idTags []string, config *checkc deviceType = getDeviceType(store) } - // fallback to Device.Vendor for backward compatibility - profileDef := config.GetProfileDef() - if profileDef != nil && vendor == "" { - vendor = profileDef.Device.Vendor + profileName = profile.Name + profileVersion = profile.Version + if vendor == "" { + vendor = profile.Device.Vendor } return devicemetadata.DeviceMetadata{ @@ -223,8 +224,8 @@ func buildNetworkDeviceMetadata(deviceID string, idTags []string, config *checkc IPAddress: config.IPAddress, SysObjectID: sysObjectID, Location: location, - Profile: config.ProfileName, - ProfileVersion: getProfileVersion(config), + Profile: profileName, + ProfileVersion: profileVersion, Vendor: vendor, Tags: tags, Subnet: config.ResolvedSubnetName, @@ -242,15 +243,6 @@ func buildNetworkDeviceMetadata(deviceID string, idTags []string, config *checkc } } -func getProfileVersion(config *checkconfig.CheckConfig) uint64 { - var profileVersion uint64 - profileDef := config.GetProfileDef() - if profileDef != nil { - profileVersion = profileDef.Version - } - return profileVersion -} - func getDeviceType(store *metadata.Store) string { deviceType := strings.ToLower(store.GetScalarAsString("device.type")) if deviceType == "" { diff --git a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata_test.go b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata_test.go index 22d58e9530216f..642cace14d0dd0 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata_test.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata_test.go @@ -9,6 +9,8 @@ import ( "bufio" "bytes" "encoding/json" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/profile" + "github.com/stretchr/testify/require" "testing" "time" @@ -23,7 +25,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/profile" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/valuestore" ) @@ -67,72 +68,74 @@ func Test_metricSender_reportNetworkDeviceMetadata_withoutInterfaces(t *testing. Definition: profiledefinition.ProfileDefinition{ Name: "my-profile", Version: 10, - }, - }, - }), - Metadata: profiledefinition.MetadataConfig{ - "device": { - Fields: map[string]profiledefinition.MetadataField{ - "name": { - // Should use value from Symbol `1.3.6.1.2.1.1.5.0` - Symbol: profiledefinition.SymbolConfig{ - OID: "1.3.6.1.2.1.1.5.0", - Name: "sysName", - }, - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "1.2.99", - Name: "doesNotExist", + Metadata: profiledefinition.MetadataConfig{ + "device": { + Fields: map[string]profiledefinition.MetadataField{ + "name": { + // Should use value from Symbol `1.3.6.1.2.1.1.5.0` + Symbol: profiledefinition.SymbolConfig{ + OID: "1.3.6.1.2.1.1.5.0", + Name: "sysName", + }, + Symbols: []profiledefinition.SymbolConfig{ + { + OID: "1.2.99", + Name: "doesNotExist", + }, + }, + }, + "description": { + // Should use value from first element in Symbols `1.3.6.1.2.1.1.1.0` + Symbol: profiledefinition.SymbolConfig{ + OID: "1.9999", + Name: "doesNotExist", + }, + Symbols: []profiledefinition.SymbolConfig{ + { + OID: "1.3.6.1.2.1.1.1.0", + Name: "sysDescr", + }, + }, + }, + "location": { + // Should use value from first element in Symbols `1.3.6.1.2.1.1.1.0` + Symbol: profiledefinition.SymbolConfig{ + OID: "1.9999", + Name: "doesNotExist", + }, + Symbols: []profiledefinition.SymbolConfig{ + { + OID: "1.888", + Name: "doesNotExist2", + }, + { + OID: "1.3.6.1.2.1.1.6.0", + Name: "sysLocation", + }, + { + OID: "1.7777", + Name: "doesNotExist2", + }, + }, + }, + "type": { + Value: "router", + }, }, }, }, - "description": { - // Should use value from first element in Symbols `1.3.6.1.2.1.1.1.0` - Symbol: profiledefinition.SymbolConfig{ - OID: "1.9999", - Name: "doesNotExist", - }, - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "1.3.6.1.2.1.1.1.0", - Name: "sysDescr", - }, - }, - }, - "location": { - // Should use value from first element in Symbols `1.3.6.1.2.1.1.1.0` - Symbol: profiledefinition.SymbolConfig{ - OID: "1.9999", - Name: "doesNotExist", - }, - Symbols: []profiledefinition.SymbolConfig{ - { - OID: "1.888", - Name: "doesNotExist2", - }, - { - OID: "1.3.6.1.2.1.1.6.0", - Name: "sysLocation", - }, - { - OID: "1.7777", - Name: "doesNotExist2", - }, - }, - }, - "type": { - Value: "router", - }, }, }, - }, + }), } layout := "2006-01-02 15:04:05" str := "2014-11-12 11:45:26" collectTime, err := time.Parse(layout, str) - assert.NoError(t, err) + require.NoError(t, err) + profile, err := config.BuildProfile("") + require.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, storeWithoutIfName, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusReachable, nil) + ms.ReportNetworkDeviceMetadata(config, profile, storeWithoutIfName, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusReachable, nil) // language=json event := []byte(` @@ -207,14 +210,17 @@ profiles: `) config, err := checkconfig.NewCheckConfig(rawInstanceConfig, rawInitConfig) - assert.Nil(t, err) + require.Nil(t, err) layout := "2006-01-02 15:04:05" str := "2014-11-12 11:45:26" collectTime, err := time.Parse(layout, str) - assert.NoError(t, err) + require.NoError(t, err) + profile, err := config.BuildProfile("") + require.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, storeWithoutIfName, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusReachable, nil) + ms.ReportNetworkDeviceMetadata(config, profile, storeWithoutIfName, []string{"tag1", "tag2"}, collectTime, + metadata.DeviceStatusReachable, metadata.DeviceStatusReachable, nil) // language=json event := []byte(` @@ -296,6 +302,8 @@ func Test_metricSender_reportNetworkDeviceMetadata_withDeviceInterfacesAndDiagno DeviceIDTags: []string{"device_name:127.0.0.1"}, ResolvedSubnetName: "127.0.0.0/29", Namespace: "my-ns", + } + profile := profiledefinition.ProfileDefinition{ Metadata: profiledefinition.MetadataConfig{ "device": { Fields: map[string]profiledefinition.MetadataField{ @@ -354,7 +362,7 @@ func Test_metricSender_reportNetworkDeviceMetadata_withDeviceInterfacesAndDiagno str := "2014-11-12 11:45:26" collectTime, err := time.Parse(layout, str) assert.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, storeWithIfName, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, diagnosis) + ms.ReportNetworkDeviceMetadata(config, profile, storeWithIfName, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, diagnosis) ifTags1 := []string{"tag1", "tag2", "status:down", "interface:21", "interface_alias:ifAlias1", "interface_index:1", "oper_status:up", "admin_status:down"} ifTags2 := []string{"tag1", "tag2", "status:off", "interface:22", "interface_index:2", "oper_status:down", "admin_status:down", "muted", "someKey:someValue"} @@ -447,6 +455,8 @@ func Test_metricSender_reportNetworkDeviceMetadata_fallbackOnFieldValue(t *testi DeviceIDTags: []string{"device_name:127.0.0.1"}, ResolvedSubnetName: "127.0.0.0/29", Namespace: "my-ns", + } + profile := profiledefinition.ProfileDefinition{ Metadata: profiledefinition.MetadataConfig{ "device": { Fields: map[string]profiledefinition.MetadataField{ @@ -469,7 +479,7 @@ func Test_metricSender_reportNetworkDeviceMetadata_fallbackOnFieldValue(t *testi collectTime, err := time.Parse(layout, str) assert.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) + ms.ReportNetworkDeviceMetadata(config, profile, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) // language=json event := []byte(` @@ -522,6 +532,8 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_Nil(t *testing DeviceIDTags: []string{"device_name:127.0.0.1"}, ResolvedSubnetName: "127.0.0.0/29", Namespace: "my-ns", + } + profile := profiledefinition.ProfileDefinition{ Metadata: profiledefinition.MetadataConfig{ "device": { Fields: map[string]profiledefinition.MetadataField{ @@ -541,7 +553,7 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_Nil(t *testing collectTime, err := time.Parse(layout, str) assert.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, 0, nil) + ms.ReportNetworkDeviceMetadata(config, profile, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, 0, nil) // language=json event := []byte(` @@ -593,6 +605,8 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_True(t *testin DeviceIDTags: []string{"device_name:127.0.0.1"}, ResolvedSubnetName: "127.0.0.0/29", Namespace: "my-ns", + } + profile := profiledefinition.ProfileDefinition{ Metadata: profiledefinition.MetadataConfig{ "device": { Fields: map[string]profiledefinition.MetadataField{ @@ -612,7 +626,7 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_True(t *testin collectTime, err := time.Parse(layout, str) assert.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) + ms.ReportNetworkDeviceMetadata(config, profile, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) // language=json event := []byte(` @@ -665,6 +679,8 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_False(t *testi DeviceIDTags: []string{"device_name:127.0.0.1"}, ResolvedSubnetName: "127.0.0.0/29", Namespace: "my-ns", + } + profile := profiledefinition.ProfileDefinition{ Metadata: profiledefinition.MetadataConfig{ "device": { Fields: map[string]profiledefinition.MetadataField{ @@ -684,7 +700,7 @@ func Test_metricSender_reportNetworkDeviceMetadata_pingCanConnect_False(t *testi collectTime, err := time.Parse(layout, str) assert.NoError(t, err) - ms.ReportNetworkDeviceMetadata(config, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) + ms.ReportNetworkDeviceMetadata(config, profile, emptyMetadataStore, []string{"tag1", "tag2"}, collectTime, metadata.DeviceStatusReachable, metadata.DeviceStatusUnreachable, nil) // language=json event := []byte(` @@ -964,37 +980,3 @@ func Test_buildInterfaceIndexByIDType(t *testing.T) { } assert.Equal(t, expectedInterfaceIndexByIDType, interfaceIndexByIDType) } - -func Test_getProfileVersion(t *testing.T) { - tests := []struct { - name string - config checkconfig.CheckConfig - expectedProfileVersion uint64 - }{ - { - name: "profile definition is present", - config: checkconfig.CheckConfig{ - ProfileName: "my-profile", - ProfileProvider: profile.StaticProvider(profile.ProfileConfigMap{ - "my-profile": profile.ProfileConfig{ - Definition: profiledefinition.ProfileDefinition{ - Name: "my-profile", - Version: 42, - }, - }, - }), - }, - expectedProfileVersion: 42, - }, - { - name: "profile definition not present", - config: checkconfig.CheckConfig{}, - expectedProfileVersion: 0, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expectedProfileVersion, getProfileVersion(&tt.config)) - }) - } -} diff --git a/pkg/networkdevice/profile/profiledefinition/profile_definition.go b/pkg/networkdevice/profile/profiledefinition/profile_definition.go index 065c0a83b4d46b..a87704ec84ce76 100644 --- a/pkg/networkdevice/profile/profiledefinition/profile_definition.go +++ b/pkg/networkdevice/profile/profiledefinition/profile_definition.go @@ -35,19 +35,6 @@ type ProfileDefinition struct { Version uint64 `yaml:"version,omitempty" json:"version"` } -// GetVendor returns the static vendor for this profile, if one is set -func (p *ProfileDefinition) GetVendor() string { - device, ok := p.Metadata["device"] - if !ok { - return "" - } - vendor, ok := device.Fields["vendor"] - if !ok { - return "" - } - return vendor.Value -} - // DeviceProfileRcConfig represent the profile stored in remote config. type DeviceProfileRcConfig struct { Profile ProfileDefinition `json:"profile_definition"` From bab631c10d61a3822d50d367b5ab2ac8062974cc Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Wed, 29 Jan 2025 19:28:27 +0100 Subject: [PATCH 54/97] fix(fleet): Mark TestInstallParity as flaky (#33550) --- test/new-e2e/tests/installer/script/default_script_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/new-e2e/tests/installer/script/default_script_test.go b/test/new-e2e/tests/installer/script/default_script_test.go index 8ba691bfdaa928..a28d3516d67b40 100644 --- a/test/new-e2e/tests/installer/script/default_script_test.go +++ b/test/new-e2e/tests/installer/script/default_script_test.go @@ -10,6 +10,7 @@ import ( "os" "strings" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" e2eos "github.com/DataDog/test-infra-definitions/components/os" @@ -85,6 +86,8 @@ func (s *installScriptDefaultSuite) TestInstallParity() { s.T().Skip("Skipping test due to missing E2E_PIPELINE_ID variable") } + flake.Mark(s.T()) // TODO: Fixme once installer 0.10.0 is released + defer s.Purge() // Full supported option set From 532ccb064b2b27dd57ca88a479f3f8b0005ee5f1 Mon Sep 17 00:00:00 2001 From: Dan Lepage <140522866+dplepage-dd@users.noreply.github.com> Date: Wed, 29 Jan 2025 14:20:24 -0500 Subject: [PATCH 55/97] [NDM] Fetch profiles instead of names. (#32374) --- .../snmp/internal/profile/config_profile.go | 28 ++-- .../snmp/internal/profile/profile.go | 30 ++-- .../snmp/internal/profile/profile_test.go | 150 +++++++++--------- 3 files changed, 111 insertions(+), 97 deletions(-) diff --git a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go index 8e74b43c7b7e40..7b1b70fd4be166 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go +++ b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go @@ -16,8 +16,6 @@ type Provider interface { HasProfile(profileName string) bool // GetProfile returns the profile with this name, or nil if there isn't one. GetProfile(profileName string) *ProfileConfig - // GetProfileNameForSysObjectID returns the name of the best matching profile for this sysObjectID, or "" if there isn't one. - GetProfileNameForSysObjectID(sysObjectID string) (string, error) // GetProfileForSysObjectID returns the best matching profile for this sysObjectID, or nil if there isn't one. GetProfileForSysObjectID(sysObjectID string) (*ProfileConfig, error) // LastUpdated returns when this Provider last changed @@ -42,16 +40,8 @@ func (s *staticProvider) HasProfile(profileName string) bool { return ok } -func (s *staticProvider) GetProfileNameForSysObjectID(sysObjectID string) (string, error) { - return getProfileForSysObjectID(s.configMap, sysObjectID) -} - func (s *staticProvider) GetProfileForSysObjectID(sysObjectID string) (*ProfileConfig, error) { - name, err := getProfileForSysObjectID(s.configMap, sysObjectID) - if err != nil { - return nil, err - } - return s.GetProfile(name), nil + return getProfileForSysObjectID(s.configMap, sysObjectID) } func (s *staticProvider) LastUpdated() time.Time { @@ -69,6 +59,22 @@ func StaticProvider(profiles ProfileConfigMap) Provider { // ProfileConfigMap is a set of ProfileConfig instances each identified by name. type ProfileConfigMap map[string]ProfileConfig +// withNames assigns the key names to Definition.Name for every profile. This is for testing. +func (pcm ProfileConfigMap) withNames() ProfileConfigMap { + for name, profile := range pcm { + if profile.Definition.Name == "" { + def := profile.Definition + def.Name = name + pcm[name] = ProfileConfig{ + DefinitionFile: profile.DefinitionFile, + Definition: def, + IsUserProfile: profile.IsUserProfile, + } + } + } + return pcm +} + // ProfileConfig represents a profile configuration. type ProfileConfig struct { DefinitionFile string `yaml:"definition_file"` diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile.go b/pkg/collector/corechecks/snmp/internal/profile/profile.go index d6e3d1219410f6..1ec74106e7de51 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile.go @@ -48,38 +48,40 @@ func loadProfiles(initConfigProfiles ProfileConfigMap) (ProfileConfigMap, error) } // getProfileForSysObjectID return a profile for a sys object id -func getProfileForSysObjectID(profiles ProfileConfigMap, sysObjectID string) (string, error) { - tmpSysOidToProfile := map[string]string{} - var matchedOids []string +func getProfileForSysObjectID(profiles ProfileConfigMap, sysObjectID string) (*ProfileConfig, error) { + tmpSysOidToProfile := map[string]*ProfileConfig{} + var matchedOIDs []string - for profile, profConfig := range profiles { + for profileName, profConfig := range profiles { for _, oidPattern := range profConfig.Definition.SysObjectIDs { found, err := filepath.Match(oidPattern, sysObjectID) if err != nil { - log.Debugf("pattern error in profile %q: %v", profile, err) + log.Debugf("pattern error in profile %q: %v", profileName, err) continue } if !found { continue } if prevMatchedProfile, ok := tmpSysOidToProfile[oidPattern]; ok { - if profiles[prevMatchedProfile].IsUserProfile && !profConfig.IsUserProfile { + if profiles[prevMatchedProfile.Definition.Name].IsUserProfile && !profConfig.IsUserProfile { continue } - if profiles[prevMatchedProfile].IsUserProfile == profConfig.IsUserProfile { - return "", fmt.Errorf("profile %q has the same sysObjectID (%s) as %q", profile, oidPattern, prevMatchedProfile) + if profiles[prevMatchedProfile.Definition.Name].IsUserProfile == profConfig.IsUserProfile { + return nil, fmt.Errorf("profile %q has the same sysObjectID (%s) as %q", profileName, oidPattern, + prevMatchedProfile.Definition.Name) } } - tmpSysOidToProfile[oidPattern] = profile - matchedOids = append(matchedOids, oidPattern) + tmpSysOidToProfile[oidPattern] = &profConfig + matchedOIDs = append(matchedOIDs, oidPattern) } } - if len(matchedOids) == 0 { - return "", fmt.Errorf("no profiles found for sysObjectID %q", sysObjectID) + if len(matchedOIDs) == 0 { + return nil, fmt.Errorf("no profiles found for sysObjectID %q", sysObjectID) } - oid, err := getMostSpecificOid(matchedOids) + oid, err := getMostSpecificOid(matchedOIDs) if err != nil { - return "", fmt.Errorf("failed to get most specific profile for sysObjectID %q, for matched oids %v: %w", sysObjectID, matchedOids, err) + return nil, fmt.Errorf("failed to get most specific profile for sysObjectID %q, for matched oids %v: %w", + sysObjectID, matchedOIDs, err) } return tmpSysOidToProfile[oid], nil } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go index 07fff6ec06c995..f2c402e8864274 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go @@ -6,6 +6,7 @@ package profile import ( + "github.com/stretchr/testify/require" "path/filepath" "sort" "testing" @@ -146,7 +147,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.5.*"}, }, }, - } + }.withNames() mockProfilesWithPatternError := ProfileConfigMap{ "profile1": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -156,7 +157,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.***.*"}, }, }, - } + }.withNames() mockProfilesWithInvalidPatternError := ProfileConfigMap{ "profile1": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -166,7 +167,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.[.*"}, }, }, - } + }.withNames() mockProfilesWithDefaultDuplicateSysobjectid := ProfileConfigMap{ "profile1": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -192,7 +193,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.4"}, }, }, - } + }.withNames() mockProfilesWithUserProfilePrecedenceWithUserProfileFirstInList := ProfileConfigMap{ "user-profile": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -211,7 +212,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"}, }, }, - } + }.withNames() mockProfilesWithUserProfilePrecedenceWithDefaultProfileFirstInList := ProfileConfigMap{ "default-profile": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -230,7 +231,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { }, IsUserProfile: true, }, - } + }.withNames() mockProfilesWithUserProfileMatchAllAndMorePreciseDefaultProfile := ProfileConfigMap{ "default-profile": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -249,7 +250,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { }, IsUserProfile: true, }, - } + }.withNames() mockProfilesWithUserDuplicateSysobjectid := ProfileConfigMap{ "profile1": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -269,101 +270,106 @@ func Test_getProfileForSysObjectID(t *testing.T) { }, IsUserProfile: true, }, - } + }.withNames() tests := []struct { - name string - profiles ProfileConfigMap - sysObjectID string - expectedProfile string - expectedError string + name string + profiles ProfileConfigMap + sysObjectID string + expectedProfileName string + expectedError string }{ { - name: "found matching profile", - profiles: mockProfiles, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.1", - expectedProfile: "profile1", - expectedError: "", + name: "found matching profile", + profiles: mockProfiles, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.1", + expectedProfileName: "profile1", + expectedError: "", }, { - name: "found more precise matching profile", - profiles: mockProfiles, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.10", - expectedProfile: "profile2", - expectedError: "", + name: "found more precise matching profile", + profiles: mockProfiles, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.10", + expectedProfileName: "profile2", + expectedError: "", }, { - name: "found even more precise matching profile", - profiles: mockProfiles, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", - expectedProfile: "profile3", - expectedError: "", + name: "found even more precise matching profile", + profiles: mockProfiles, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", + expectedProfileName: "profile3", + expectedError: "", }, { - name: "user profile have precedence with user first in list", - profiles: mockProfilesWithUserProfilePrecedenceWithUserProfileFirstInList, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3", - expectedProfile: "user-profile", - expectedError: "", + name: "user profile have precedence with user first in list", + profiles: mockProfilesWithUserProfilePrecedenceWithUserProfileFirstInList, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3", + expectedProfileName: "user-profile", + expectedError: "", }, { - name: "user profile have precedence with default first in list", - profiles: mockProfilesWithUserProfilePrecedenceWithDefaultProfileFirstInList, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3", - expectedProfile: "user-profile", - expectedError: "", + name: "user profile have precedence with default first in list", + profiles: mockProfilesWithUserProfilePrecedenceWithDefaultProfileFirstInList, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3", + expectedProfileName: "user-profile", + expectedError: "", }, { - name: "user profile with less specific sysobjectid does not have precedence over a default profiel with more precise sysobjectid", - profiles: mockProfilesWithUserProfileMatchAllAndMorePreciseDefaultProfile, - sysObjectID: "1.3.999", - expectedProfile: "default-profile", - expectedError: "", + name: "user profile with less specific sysobjectid does not have precedence over a default profiel with more precise sysobjectid", + profiles: mockProfilesWithUserProfileMatchAllAndMorePreciseDefaultProfile, + sysObjectID: "1.3.999", + expectedProfileName: "default-profile", + expectedError: "", }, { - name: "failed to get most specific profile for sysObjectID", - profiles: mockProfilesWithPatternError, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", - expectedProfile: "", - expectedError: "failed to get most specific profile for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\", for matched oids [1.3.6.1.4.1.3375.2.1.3.***.*]: error parsing part `***` for pattern `1.3.6.1.4.1.3375.2.1.3.***.*`: strconv.Atoi: parsing \"***\": invalid syntax", + name: "failed to get most specific profile for sysObjectID", + profiles: mockProfilesWithPatternError, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", + expectedProfileName: "", + expectedError: "failed to get most specific profile for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\", for matched oids [1.3.6.1.4.1.3375.2.1.3.***.*]: error parsing part `***` for pattern `1.3.6.1.4.1.3375.2.1.3.***.*`: strconv.Atoi: parsing \"***\": invalid syntax", }, { - name: "invalid pattern", // profiles with invalid patterns are skipped, leading to: cannot get most specific oid from empty list of oids - profiles: mockProfilesWithInvalidPatternError, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", - expectedProfile: "", - expectedError: "no profiles found for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\"", + name: "invalid pattern", // profiles with invalid patterns are skipped, leading to: cannot get most specific oid from empty list of oids + profiles: mockProfilesWithInvalidPatternError, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", + expectedProfileName: "", + expectedError: "no profiles found for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\"", }, { - name: "duplicate sysobjectid", - profiles: mockProfilesWithDefaultDuplicateSysobjectid, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3", - expectedProfile: "", - expectedError: "has the same sysObjectID (1.3.6.1.4.1.3375.2.1.3) as", + name: "duplicate sysobjectid", + profiles: mockProfilesWithDefaultDuplicateSysobjectid, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3", + expectedProfileName: "", + expectedError: "has the same sysObjectID (1.3.6.1.4.1.3375.2.1.3) as", }, { - name: "unrelated duplicate sysobjectid should not raise error", - profiles: mockProfilesWithDefaultDuplicateSysobjectid, - sysObjectID: "1.3.6.1.4.1.3375.2.1.4", - expectedProfile: "profile3", - expectedError: "", + name: "unrelated duplicate sysobjectid should not raise error", + profiles: mockProfilesWithDefaultDuplicateSysobjectid, + sysObjectID: "1.3.6.1.4.1.3375.2.1.4", + expectedProfileName: "profile3", + expectedError: "", }, { - name: "duplicate sysobjectid", - profiles: mockProfilesWithUserDuplicateSysobjectid, - sysObjectID: "1.3.6.1.4.1.3375.2.1.3", - expectedProfile: "", - expectedError: "has the same sysObjectID (1.3.6.1.4.1.3375.2.1.3) as", + name: "duplicate sysobjectid", + profiles: mockProfilesWithUserDuplicateSysobjectid, + sysObjectID: "1.3.6.1.4.1.3375.2.1.3", + expectedProfileName: "", + expectedError: "has the same sysObjectID (1.3.6.1.4.1.3375.2.1.3) as", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { profile, err := getProfileForSysObjectID(tt.profiles, tt.sysObjectID) - if tt.expectedError == "" { - assert.Nil(t, err) - } else { + if tt.expectedError != "" { + require.Error(t, err) assert.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + } + if tt.expectedProfileName == "" { + assert.Nil(t, profile) + } else { + assert.Equal(t, tt.expectedProfileName, profile.Definition.Name) } - assert.Equal(t, tt.expectedProfile, profile) }) } } From 62c66911a6b20320fac5b1ee12a809f420d0e383 Mon Sep 17 00:00:00 2001 From: Gabriel Dos Santos <91925154+gabedos@users.noreply.github.com> Date: Wed, 29 Jan 2025 14:22:22 -0500 Subject: [PATCH 56/97] fix: Move DCA changelogs (#33537) Co-authored-by: Ursula Chen <58821586+urseberry@users.noreply.github.com> --- .../notes/agent-sidecar-security-f60f67e2c493cbd0.yaml | 5 ++--- .../notes/kubeapiserver-gpu-tagging-f780b5a3893880f4.yaml | 4 ++-- .../notes/sidecar-killed-on-job-fe0a57ea49c2529f.yaml | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) rename releasenotes/notes/agent-sidecar-security-cbfd5ea9f72124d0.yaml => releasenotes-dca/notes/agent-sidecar-security-f60f67e2c493cbd0.yaml (71%) rename releasenotes/notes/kubeapiserver-gpu-tagging-e6202bc782982e5d.yaml => releasenotes-dca/notes/kubeapiserver-gpu-tagging-f780b5a3893880f4.yaml (74%) rename releasenotes/notes/sidecar-killed-on-job-5dd6537ed1098646.yaml => releasenotes-dca/notes/sidecar-killed-on-job-fe0a57ea49c2529f.yaml (86%) diff --git a/releasenotes/notes/agent-sidecar-security-cbfd5ea9f72124d0.yaml b/releasenotes-dca/notes/agent-sidecar-security-f60f67e2c493cbd0.yaml similarity index 71% rename from releasenotes/notes/agent-sidecar-security-cbfd5ea9f72124d0.yaml rename to releasenotes-dca/notes/agent-sidecar-security-f60f67e2c493cbd0.yaml index a1f2095a0d121c..ce055dd6d3f0ff 100644 --- a/releasenotes/notes/agent-sidecar-security-cbfd5ea9f72124d0.yaml +++ b/releasenotes-dca/notes/agent-sidecar-security-f60f67e2c493cbd0.yaml @@ -1,14 +1,13 @@ # Each section from every release note are combined when the -# CHANGELOG.rst is rendered. So the text needs to be worded so that +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that # it does not depend on any information only available in another # section. This may mean repeating some details, but each section # must be readable independently of the other. # # Each section note must be formatted as reStructuredText. --- -upgrade: enhancements: - | The Datadog Cluster Agent admission controller agent sidecar injection now sets up Agent sidecars to run with securityContext of `readOnlyRootFilesystem:false` by default. - Advanced users can customize the securityContext via clusterAgent.admissionController.agentSidecarInjection.profiles. + Advanced users can customize the securityContext through clusterAgent.admissionController.agentSidecarInjection.profiles. \ No newline at end of file diff --git a/releasenotes/notes/kubeapiserver-gpu-tagging-e6202bc782982e5d.yaml b/releasenotes-dca/notes/kubeapiserver-gpu-tagging-f780b5a3893880f4.yaml similarity index 74% rename from releasenotes/notes/kubeapiserver-gpu-tagging-e6202bc782982e5d.yaml rename to releasenotes-dca/notes/kubeapiserver-gpu-tagging-f780b5a3893880f4.yaml index a09ed5f24515e7..453a01bd074c18 100644 --- a/releasenotes/notes/kubeapiserver-gpu-tagging-e6202bc782982e5d.yaml +++ b/releasenotes-dca/notes/kubeapiserver-gpu-tagging-f780b5a3893880f4.yaml @@ -1,5 +1,5 @@ # Each section from every release note are combined when the -# CHANGELOG.rst is rendered. So the text needs to be worded so that +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that # it does not depend on any information only available in another # section. This may mean repeating some details, but each section # must be readable independently of the other. @@ -9,4 +9,4 @@ fixes: - | Include `gpu_vendor` pod tags on the Datadog Cluster Agent when - enabling datadog.clusterTagger.collectKubernetesTags. + enabling datadog.clusterTagger.collectKubernetesTags. \ No newline at end of file diff --git a/releasenotes/notes/sidecar-killed-on-job-5dd6537ed1098646.yaml b/releasenotes-dca/notes/sidecar-killed-on-job-fe0a57ea49c2529f.yaml similarity index 86% rename from releasenotes/notes/sidecar-killed-on-job-5dd6537ed1098646.yaml rename to releasenotes-dca/notes/sidecar-killed-on-job-fe0a57ea49c2529f.yaml index 893c7b24d3d9a7..ddff4ee64cc525 100644 --- a/releasenotes/notes/sidecar-killed-on-job-5dd6537ed1098646.yaml +++ b/releasenotes-dca/notes/sidecar-killed-on-job-fe0a57ea49c2529f.yaml @@ -1,5 +1,5 @@ # Each section from every release note are combined when the -# CHANGELOG.rst is rendered. So the text needs to be worded so that +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that # it does not depend on any information only available in another # section. This may mean repeating some details, but each section # must be readable independently of the other. From c7c8c4551930150b04fd95d98e89536c11eb2ad2 Mon Sep 17 00:00:00 2001 From: Mark Spicer Date: Wed, 29 Jan 2025 14:31:22 -0500 Subject: [PATCH 57/97] chore(ssi): refactor instrumentation config (#33508) --- .../mutate/autoinstrumentation/config.go | 66 ++++++++++++++--- .../mutate/autoinstrumentation/config_test.go | 73 +++++++++++++++++++ .../testdata/both_enabled_and_disabled.yaml | 8 ++ .../testdata/disabled_namespaces.yaml | 11 +++ .../testdata/enabled_namespaces.yaml | 11 +++ 5 files changed, 158 insertions(+), 11 deletions(-) create mode 100644 pkg/clusteragent/admission/mutate/autoinstrumentation/config_test.go create mode 100644 pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/both_enabled_and_disabled.yaml create mode 100644 pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/disabled_namespaces.yaml create mode 100644 pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/enabled_namespaces.yaml diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go index 84fd2adb98f64a..9a2b6fa549784e 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go @@ -19,6 +19,50 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) +// InstrumentationConfig is a struct to store the configuration for the autoinstrumentation logic. It can be populated +// using the datadog config through NewInstrumentationConfig. +type InstrumentationConfig struct { + // Enabled is a flag to enable the auto instrumentation. If false, the auto instrumentation is disabled with the + // caveat of the annotation based instrumentation. Full config + // key: apm_config.instrumentation.enabled + Enabled bool `mapstructure:"enabled"` + // EnabledNamespaces is a list of namespaces where the autoinstrumentation is enabled. If empty, it is enabled in + // all namespaces. EnabledNamespace and DisabledNamespaces are mutually exclusive and cannot be set together. Full + // config key: apm_config.instrumentation.enabled_namespaces + EnabledNamespaces []string `mapstructure:"enabled_namespaces"` + // DisabledNamespaces is a list of namespaces where the autoinstrumentation is disabled. If empty, it is enabled in + // all namespaces. EnabledNamespace and DisabledNamespaces are mutually exclusive and cannot be set together. Full + // config key: apm_config.instrumentation.disabled_namespaces + DisabledNamespaces []string `mapstructure:"disabled_namespaces"` + // LibVersions is a map of tracer libraries to inject with their versions. The key is the language and the value is + // the version of the library to inject. If empty, the auto instrumentation will inject all libraries. Full config + // key: apm_config.instrumentation.lib_versions + LibVersions map[string]string `mapstructure:"lib_versions"` + // Version is the version of the autoinstrumentation logic to use. We don't expose this option to the user, and V1 + // is deprecated and slated for removal. Full config key: apm_config.instrumentation.version + Version string `mapstructure:"version"` + // InjectorImageTag is the tag of the image to use for the auto instrumentation injector library. Full config key: + // apm_config.instrumentation.injector_image_tag + InjectorImageTag string `mapstructure:"injector_image_tag"` +} + +// NewInstrumentationConfig creates a new InstrumentationConfig from the datadog config. It returns an error if the +// configuration is invalid. +func NewInstrumentationConfig(datadogConfig config.Component) (*InstrumentationConfig, error) { + cfg := &InstrumentationConfig{} + err := datadogConfig.UnmarshalKey("apm_config.instrumentation", cfg) + if err != nil { + return nil, fmt.Errorf("unable to parse apm_config.instrumentation: %w", err) + } + + // Ensure both enabled and disabled namespaces are not set together. + if len(cfg.EnabledNamespaces) > 0 && len(cfg.DisabledNamespaces) > 0 { + return nil, fmt.Errorf("apm.instrumentation.enabled_namespaces and apm.instrumentation.disabled_namespaces are mutually exclusive and cannot be set together") + } + + return cfg, nil +} + var ( minimumCPULimit resource.Quantity = resource.MustParse("0.05") // 0.05 core, otherwise copying + library initialization is going to take forever minimumMemoryLimit resource.Quantity = resource.MustParse("100Mi") // 100 MB (recommended minimum by Alpine) @@ -56,7 +100,6 @@ type initResourceRequirementConfiguration map[corev1.ResourceName]resource.Quant // retrieveConfig retrieves the configuration for the autoinstrumentation webhook from the datadog config func retrieveConfig(datadogConfig config.Component, injectionFilter mutatecommon.InjectionFilter) (webhookConfig, error) { - webhookConfig := webhookConfig{ isEnabled: datadogConfig.GetBool("admission_controller.auto_instrumentation.enabled"), endpoint: datadogConfig.GetString("admission_controller.auto_instrumentation.endpoint"), @@ -71,13 +114,18 @@ func retrieveConfig(datadogConfig config.Component, injectionFilter mutatecommon profilingEnabled: getOptionalStringValue(datadogConfig, "admission_controller.auto_instrumentation.profiling.enabled"), containerRegistry: mutatecommon.ContainerRegistry(datadogConfig, "admission_controller.auto_instrumentation.container_registry"), - injectorImageTag: datadogConfig.GetString("apm_config.instrumentation.injector_image_tag"), injectionFilter: injectionFilter, } - webhookConfig.pinnedLibraries = getPinnedLibraries(datadogConfig, webhookConfig.containerRegistry) - var err error - if webhookConfig.version, err = instrumentationVersion(datadogConfig.GetString("apm_config.instrumentation.version")); err != nil { + instCfg, err := NewInstrumentationConfig(datadogConfig) + if err != nil { + return webhookConfig, err + } + + webhookConfig.pinnedLibraries = getPinnedLibraries(instCfg.LibVersions, webhookConfig.containerRegistry) + webhookConfig.injectorImageTag = instCfg.InjectorImageTag + + if webhookConfig.version, err = instrumentationVersion(instCfg.Version); err != nil { return webhookConfig, fmt.Errorf("invalid version for key apm_config.instrumentation.version: %w", err) } @@ -118,13 +166,9 @@ func getOptionalStringValue(datadogConfig config.Component, key string) *string // getPinnedLibraries returns tracing libraries to inject as configured by apm_config.instrumentation.lib_versions // given a registry. -func getPinnedLibraries(datadogConfig config.Component, registry string) []libInfo { - // If APM Instrumentation is enabled and configuration apm_config.instrumentation.lib_versions specified, - // inject only the libraries from the configuration - singleStepLibraryVersions := datadogConfig.GetStringMapString("apm_config.instrumentation.lib_versions") - +func getPinnedLibraries(libVersions map[string]string, registry string) []libInfo { var res []libInfo - for lang, version := range singleStepLibraryVersions { + for lang, version := range libVersions { l := language(lang) if !l.isSupported() { log.Warnf("APM Instrumentation detected configuration for unsupported language: %s. Tracing library for %s will not be injected", lang, lang) diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/config_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/config_test.go new file mode 100644 index 00000000000000..3eb9daa1ea40b0 --- /dev/null +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/config_test.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package autoinstrumentation + +import ( + "testing" + + "github.com/stretchr/testify/require" + + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" +) + +func TestNewInstrumentationConfig(t *testing.T) { + tests := []struct { + name string + configPath string + expected *InstrumentationConfig + shouldErr bool + }{ + { + name: "valid config enabled namespaces", + configPath: "testdata/enabled_namespaces.yaml", + shouldErr: false, + expected: &InstrumentationConfig{ + Enabled: true, + EnabledNamespaces: []string{"default"}, + DisabledNamespaces: []string{}, + LibVersions: map[string]string{ + "python": "default", + }, + Version: "v2", + InjectorImageTag: "foo", + }, + }, + { + name: "valid config disabled namespaces", + configPath: "testdata/disabled_namespaces.yaml", + shouldErr: false, + expected: &InstrumentationConfig{ + Enabled: true, + EnabledNamespaces: []string{}, + DisabledNamespaces: []string{"default"}, + LibVersions: map[string]string{ + "python": "default", + }, + Version: "v2", + InjectorImageTag: "foo", + }, + }, + { + name: "both enabled and disabled namespaces", + configPath: "testdata/both_enabled_and_disabled.yaml", + shouldErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockConfig := configmock.NewFromFile(t, tt.configPath) + actual, err := NewInstrumentationConfig(mockConfig) + if tt.shouldErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/both_enabled_and_disabled.yaml b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/both_enabled_and_disabled.yaml new file mode 100644 index 00000000000000..710efa04366576 --- /dev/null +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/both_enabled_and_disabled.yaml @@ -0,0 +1,8 @@ +--- +apm_config: + instrumentation: + enabled_namespaces: + - "foo" + disabled_namespaces: + - "bar" + diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/disabled_namespaces.yaml b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/disabled_namespaces.yaml new file mode 100644 index 00000000000000..4fe4f7acff0311 --- /dev/null +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/disabled_namespaces.yaml @@ -0,0 +1,11 @@ +--- +apm_config: + instrumentation: + enabled: true + disabled_namespaces: + - "default" + lib_versions: + python: "default" + version: "v2" + injector_image_tag: "foo" + diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/enabled_namespaces.yaml b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/enabled_namespaces.yaml new file mode 100644 index 00000000000000..3813b2ed4de1ac --- /dev/null +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/testdata/enabled_namespaces.yaml @@ -0,0 +1,11 @@ +--- +apm_config: + instrumentation: + enabled: true + enabled_namespaces: + - "default" + lib_versions: + python: "default" + version: "v2" + injector_image_tag: "foo" + From add7d95f5fb541a2544b9658959f2cd2fbdc9aee Mon Sep 17 00:00:00 2001 From: Maxime Riaud <65339037+misteriaud@users.noreply.github.com> Date: Wed, 29 Jan 2025 20:34:28 +0100 Subject: [PATCH 58/97] Reapply "[ASCII-2587] Migrating TraceAgent to use IPC cert" (#33402) --- cmd/agent/subcommands/flare/command.go | 22 +++++--- cmd/agent/subcommands/flare/command_test.go | 18 +++++-- cmd/agent/subcommands/secret/command.go | 2 +- comp/trace/agent/impl/agent.go | 4 ++ comp/trace/agent/impl/run.go | 3 ++ comp/trace/bundle_test.go | 4 ++ comp/trace/status/statusimpl/status.go | 2 +- pkg/config/fetcher/from_processes.go | 2 +- pkg/flare/archive.go | 2 +- pkg/trace/api/api_test.go | 52 +++++++++++++++++-- pkg/trace/api/debug_server.go | 35 +++++++++---- pkg/trace/info/info.go | 6 ++- pkg/trace/info/info_test.go | 8 +-- .../config-refresh/config_endpoint.go | 2 +- 14 files changed, 128 insertions(+), 34 deletions(-) diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index e3a20c4789284b..75fa34136feacb 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -177,10 +177,18 @@ func readProfileData(seconds int) (flare.ProfileData, error) { type pprofGetter func(path string) ([]byte, error) - tcpGet := func(portConfig string) pprofGetter { - pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt(portConfig)) + tcpGet := func(portConfig string, onHTTPS bool) pprofGetter { + endpoint := url.URL{ + Scheme: "http", + Host: net.JoinHostPort("127.0.0.1", strconv.Itoa(pkgconfigsetup.Datadog().GetInt(portConfig))), + Path: "/debug/pprof", + } + if onHTTPS { + endpoint.Scheme = "https" + } + return func(path string) ([]byte, error) { - return util.DoGet(c, pprofURL+path, util.LeaveConnectionOpen) + return util.DoGet(c, endpoint.String()+path, util.LeaveConnectionOpen) } } @@ -230,15 +238,15 @@ func readProfileData(seconds int) (flare.ProfileData, error) { } agentCollectors := map[string]agentProfileCollector{ - "core": serviceProfileCollector(tcpGet("expvar_port"), seconds), - "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port"), seconds), + "core": serviceProfileCollector(tcpGet("expvar_port", false), seconds), + "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port", false), seconds), } if pkgconfigsetup.Datadog().GetBool("process_config.enabled") || pkgconfigsetup.Datadog().GetBool("process_config.container_collection.enabled") || pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { - agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port"), seconds) + agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port", false), seconds) } if pkgconfigsetup.Datadog().GetBool("apm_config.enabled") { @@ -251,7 +259,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { traceCpusec = 4 } - agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port"), traceCpusec) + agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port", true), traceCpusec) } if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { diff --git a/cmd/agent/subcommands/flare/command_test.go b/cmd/agent/subcommands/flare/command_test.go index a27304e95b1338..4c96cae079aa96 100644 --- a/cmd/agent/subcommands/flare/command_test.go +++ b/cmd/agent/subcommands/flare/command_test.go @@ -29,6 +29,7 @@ type commandTestSuite struct { suite.Suite sysprobeSocketPath string tcpServer *httptest.Server + tcpTLSServer *httptest.Server unixServer *httptest.Server systemProbeServer *httptest.Server } @@ -42,13 +43,17 @@ func (c *commandTestSuite) SetupSuite() { // This should be called by each test that requires them. func (c *commandTestSuite) startTestServers() { t := c.T() - c.tcpServer, c.unixServer, c.systemProbeServer = c.getPprofTestServer() + c.tcpServer, c.tcpTLSServer, c.unixServer, c.systemProbeServer = c.getPprofTestServer() t.Cleanup(func() { if c.tcpServer != nil { c.tcpServer.Close() c.tcpServer = nil } + if c.tcpTLSServer != nil { + c.tcpTLSServer.Close() + c.tcpTLSServer = nil + } if c.unixServer != nil { c.unixServer.Close() c.unixServer = nil @@ -82,12 +87,13 @@ func newMockHandler() http.HandlerFunc { }) } -func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, unixServer *httptest.Server, sysProbeServer *httptest.Server) { +func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, tcpTLSServer *httptest.Server, unixServer *httptest.Server, sysProbeServer *httptest.Server) { var err error t := c.T() handler := newMockHandler() tcpServer = httptest.NewServer(handler) + tcpTLSServer = httptest.NewTLSServer(handler) if runtime.GOOS == "linux" { unixServer = httptest.NewUnstartedServer(handler) unixServer.Listener, err = net.Listen("unix", c.sysprobeSocketPath) @@ -101,7 +107,7 @@ func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, uni sysProbeServer.Start() } - return tcpServer, unixServer, sysProbeServer + return tcpServer, tcpTLSServer, unixServer, sysProbeServer } func TestCommandTestSuite(t *testing.T) { @@ -116,10 +122,14 @@ func (c *commandTestSuite) TestReadProfileData() { require.NoError(t, err) port := u.Port() + u, err = url.Parse(c.tcpTLSServer.URL) + require.NoError(t, err) + httpsPort := u.Port() + mockConfig := configmock.New(t) mockConfig.SetWithoutSource("expvar_port", port) mockConfig.SetWithoutSource("apm_config.enabled", true) - mockConfig.SetWithoutSource("apm_config.debug.port", port) + mockConfig.SetWithoutSource("apm_config.debug.port", httpsPort) mockConfig.SetWithoutSource("apm_config.receiver_timeout", "10") mockConfig.SetWithoutSource("process_config.expvar_port", port) mockConfig.SetWithoutSource("security_agent.expvar_port", port) diff --git a/cmd/agent/subcommands/secret/command.go b/cmd/agent/subcommands/secret/command.go index b57fa2a6a9ddbe..09f814957ca027 100644 --- a/cmd/agent/subcommands/secret/command.go +++ b/cmd/agent/subcommands/secret/command.go @@ -114,7 +114,7 @@ func commonSubAgentSecretRefresh(conf config.Component, agentName, portConfigNam c := apiutil.GetClient(false) c.Timeout = conf.GetDuration("server_timeout") * time.Second - url := fmt.Sprintf("http://127.0.0.1:%d/secret/refresh", port) + url := fmt.Sprintf("https://127.0.0.1:%d/secret/refresh", port) res, err := apiutil.DoGet(c, url, apiutil.CloseConnection) if err != nil { return nil, fmt.Errorf("could not contact %s: %s", agentName, err) diff --git a/comp/trace/agent/impl/agent.go b/comp/trace/agent/impl/agent.go index f05d2d7b3c310d..5228d6084d56ea 100644 --- a/comp/trace/agent/impl/agent.go +++ b/comp/trace/agent/impl/agent.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/api/authtoken" "github.com/DataDog/datadog-agent/comp/core/secrets" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" @@ -68,6 +69,7 @@ type dependencies struct { Statsd statsd.Component Tagger tagger.Component Compressor compression.Component + At authtoken.Component } var _ traceagent.Component = (*component)(nil) @@ -93,6 +95,7 @@ type component struct { params *Params tagger tagger.Component telemetryCollector telemetry.TelemetryCollector + at authtoken.Component wg *sync.WaitGroup } @@ -115,6 +118,7 @@ func NewAgent(deps dependencies) (traceagent.Component, error) { params: deps.Params, telemetryCollector: deps.TelemetryCollector, tagger: deps.Tagger, + at: deps.At, wg: &sync.WaitGroup{}, } statsdCl, err := setupMetrics(deps.Statsd, c.config, c.telemetryCollector) diff --git a/comp/trace/agent/impl/run.go b/comp/trace/agent/impl/run.go index 20ecbc6496a1b3..c4163d5b41680f 100644 --- a/comp/trace/agent/impl/run.go +++ b/comp/trace/agent/impl/run.go @@ -33,6 +33,9 @@ import ( // runAgentSidekicks is the entrypoint for running non-components that run along the agent. func runAgentSidekicks(ag component) error { + // Configure the Trace Agent Debug server to use the IPC certificate + ag.Agent.DebugServer.SetTLSConfig(ag.at.GetTLSServerConfig()) + tracecfg := ag.config.Object() err := info.InitInfo(tracecfg) // for expvar & -info option if err != nil { diff --git a/comp/trace/bundle_test.go b/comp/trace/bundle_test.go index e9874a4b40077c..692e7c255f4731 100644 --- a/comp/trace/bundle_test.go +++ b/comp/trace/bundle_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/api/authtoken/createandfetchimpl" + "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" "github.com/DataDog/datadog-agent/comp/core" coreconfig "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" @@ -45,6 +47,7 @@ func TestBundleDependencies(t *testing.T) { zstdfx.Module(), taggerfx.Module(tagger.Params{}), fx.Supply(&traceagentimpl.Params{}), + createandfetchimpl.Module(), ) } @@ -75,6 +78,7 @@ func TestMockBundleDependencies(t *testing.T) { fx.Invoke(func(_ traceagent.Component) {}), MockBundle(), taggerfx.Module(tagger.Params{}), + fetchonlyimpl.MockModule(), )) require.NotNil(t, cfg.Object()) diff --git a/comp/trace/status/statusimpl/status.go b/comp/trace/status/statusimpl/status.go index e476ee0281d7a4..00a8730b87da8d 100644 --- a/comp/trace/status/statusimpl/status.go +++ b/comp/trace/status/statusimpl/status.go @@ -95,7 +95,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { port := s.Config.GetInt("apm_config.debug.port") c := client() - url := fmt.Sprintf("http://localhost:%d/debug/vars", port) + url := fmt.Sprintf("https://localhost:%d/debug/vars", port) resp, err := apiutil.DoGet(c, url, apiutil.CloseConnection) if err != nil { return map[string]interface{}{ diff --git a/pkg/config/fetcher/from_processes.go b/pkg/config/fetcher/from_processes.go index ee3f1e0062ce52..e6f86f6c44e191 100644 --- a/pkg/config/fetcher/from_processes.go +++ b/pkg/config/fetcher/from_processes.go @@ -71,7 +71,7 @@ func TraceAgentConfig(config config.Reader) (string, error) { c := util.GetClient(false) c.Timeout = config.GetDuration("server_timeout") * time.Second - ipcAddressWithPort := fmt.Sprintf("http://127.0.0.1:%d/config", port) + ipcAddressWithPort := fmt.Sprintf("https://127.0.0.1:%d/config", port) client := settingshttp.NewClient(c, ipcAddressWithPort, "trace-agent", settingshttp.NewHTTPClientOptions(util.CloseConnection)) return client.FullConfig() diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index 54f94503268418..5581afaded686f 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -216,7 +216,7 @@ func GetExpVar(fb flaretypes.FlareBuilder) error { apmDebugPort := pkgconfigsetup.Datadog().GetInt("apm_config.debug.port") f := filepath.Join("expvar", "trace-agent") - resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/debug/vars", apmDebugPort)) + resp, err := http.Get(fmt.Sprintf("https://127.0.0.1:%d/debug/vars", apmDebugPort)) if err != nil { return fb.AddFile(f, []byte(fmt.Sprintf("Error retrieving vars: %v", err))) } diff --git a/pkg/trace/api/api_test.go b/pkg/trace/api/api_test.go index e9632f7c573c42..bc72f65053f5b1 100644 --- a/pkg/trace/api/api_test.go +++ b/pkg/trace/api/api_test.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "io" + "net" "net/http" "net/http/httptest" "os" @@ -1044,14 +1045,26 @@ func TestExpvar(t *testing.T) { } c := newTestReceiverConfig() - c.DebugServerPort = 5012 + c.DebugServerPort = 6789 info.InitInfo(c) + + // Starting a TLS httptest server to retrieve tlsCert + ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) + tlsConfig := ts.TLS.Clone() + // Setting a client with the proper TLS configuration + client := ts.Client() + ts.Close() + + // Starting Debug Server s := NewDebugServer(c) + s.SetTLSConfig(tlsConfig) + + // Starting the Debug server s.Start() defer s.Stop() - resp, err := http.Get("http://127.0.0.1:5012/debug/vars") - assert.NoError(t, err) + resp, err := client.Get(fmt.Sprintf("https://127.0.0.1:%d/debug/vars", c.DebugServerPort)) + require.NoError(t, err) defer resp.Body.Close() t.Run("read-expvars", func(t *testing.T) { @@ -1067,6 +1080,39 @@ func TestExpvar(t *testing.T) { }) } +func TestWithoutIPCCert(t *testing.T) { + c := newTestReceiverConfig() + + // Getting an available port + a, err := net.ResolveTCPAddr("tcp", "localhost:0") + require.NoError(t, err) + + var l *net.TCPListener + l, err = net.ListenTCP("tcp", a) + require.NoError(t, err) + + availablePort := l.Addr().(*net.TCPAddr).Port + require.NoError(t, l.Close()) + require.NotZero(t, availablePort) + + c.DebugServerPort = availablePort + info.InitInfo(c) + + // Starting Debug Server + s := NewDebugServer(c) + + // Starting the Debug server + s.Start() + defer s.Stop() + + // Server should not be able to connect because it didn't start + conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(c.DebugServerPort)), time.Second) + require.Error(t, err) + if conn != nil { + conn.Close() + } +} + func TestNormalizeHTTPHeader(t *testing.T) { tests := []struct { input string diff --git a/pkg/trace/api/debug_server.go b/pkg/trace/api/debug_server.go index 828d5357330ebd..812161628e9241 100644 --- a/pkg/trace/api/debug_server.go +++ b/pkg/trace/api/debug_server.go @@ -9,6 +9,7 @@ package api import ( "context" + "crypto/tls" "expvar" "fmt" "net" @@ -29,9 +30,10 @@ const ( // DebugServer serves /debug/* endpoints type DebugServer struct { - conf *config.AgentConfig - server *http.Server - mux *http.ServeMux + conf *config.AgentConfig + server *http.Server + mux *http.ServeMux + tlsConfig *tls.Config } // NewDebugServer returns a debug server @@ -48,18 +50,28 @@ func (ds *DebugServer) Start() { log.Debug("Debug server is disabled by config (apm_config.debug.port: 0).") return } - ds.server = &http.Server{ - ReadTimeout: defaultTimeout, - WriteTimeout: defaultTimeout, - Handler: ds.setupMux(), + + // TODO: Improve certificate delivery + if ds.tlsConfig == nil { + log.Warnf("Debug server wasn't able to start: uninitialized IPC certificate") + return } - listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", ds.conf.DebugServerPort)) + + listener, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(ds.conf.DebugServerPort))) if err != nil { log.Errorf("Error creating debug server listener: %s", err) return } + + ds.server = &http.Server{ + ReadTimeout: defaultTimeout, + WriteTimeout: defaultTimeout, + Handler: ds.setupMux(), + } + + tlsListener := tls.NewListener(listener, ds.tlsConfig) go func() { - if err := ds.server.Serve(listener); err != nil && err != http.ErrServerClosed { + if err := ds.server.Serve(tlsListener); err != nil && err != http.ErrServerClosed { log.Errorf("Could not start debug server: %s. Debug server disabled.", err) } }() @@ -82,6 +94,11 @@ func (ds *DebugServer) AddRoute(route string, handler http.Handler) { ds.mux.Handle(route, handler) } +// SetTLSConfig adds the provided tls.Config to the internal http.Server +func (ds *DebugServer) SetTLSConfig(config *tls.Config) { + ds.tlsConfig = config +} + func (ds *DebugServer) setupMux() *http.ServeMux { ds.mux.HandleFunc("/debug/pprof/", pprof.Index) ds.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) diff --git a/pkg/trace/info/info.go b/pkg/trace/info/info.go index 0012b285416a57..ab85e2605cad0c 100644 --- a/pkg/trace/info/info.go +++ b/pkg/trace/info/info.go @@ -8,6 +8,7 @@ package info import ( "bytes" + "crypto/tls" "encoding/json" "expvar" // automatically publish `/debug/vars` on HTTP port "fmt" @@ -237,8 +238,9 @@ func getProgramBanner(version string) (string, string) { // If error is nil, means the program is running. // If not, it displays a pretty-printed message anyway (for support) func Info(w io.Writer, conf *config.AgentConfig) error { - url := fmt.Sprintf("http://127.0.0.1:%d/debug/vars", conf.DebugServerPort) - client := http.Client{Timeout: 3 * time.Second} + url := fmt.Sprintf("https://127.0.0.1:%d/debug/vars", conf.DebugServerPort) + tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} + client := http.Client{Timeout: 3 * time.Second, Transport: tr} resp, err := client.Get(url) if err != nil { // OK, here, we can't even make an http call on the agent port, diff --git a/pkg/trace/info/info_test.go b/pkg/trace/info/info_test.go index 596d536601e4c5..b15a604c640aa5 100644 --- a/pkg/trace/info/info_test.go +++ b/pkg/trace/info/info_test.go @@ -63,7 +63,7 @@ func (h *testServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func testServer(t *testing.T, testFile string) *httptest.Server { t.Helper() - server := httptest.NewServer(&testServerHandler{t: t, testFile: testFile}) + server := httptest.NewTLSServer(&testServerHandler{t: t, testFile: testFile}) t.Logf("test server (serving fake yet valid data) listening on %s", server.URL) return server } @@ -94,7 +94,7 @@ func (h *testServerWarningHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ } func testServerWarning(t *testing.T) *httptest.Server { - server := httptest.NewServer(&testServerWarningHandler{t: t}) + server := httptest.NewTLSServer(&testServerWarningHandler{t: t}) t.Logf("test server (serving data containing worrying values) listening on %s", server.URL) return server } @@ -119,7 +119,7 @@ func (h *testServerErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques } func testServerError(t *testing.T) *httptest.Server { - server := httptest.NewServer(&testServerErrorHandler{t: t}) + server := httptest.NewTLSServer(&testServerErrorHandler{t: t}) t.Logf("test server (serving bad data to trigger errors) listening on %s", server.URL) return server } @@ -331,7 +331,7 @@ func TestError(t *testing.T) { assert.Equal(len(lines[1]), len(lines[2])) assert.Equal("", lines[3]) assert.Regexp(regexp.MustCompile(`^ Error: .*$`), lines[4]) - assert.Equal(fmt.Sprintf(" URL: http://127.0.0.1:%d/debug/vars", port), lines[5]) + assert.Equal(fmt.Sprintf(" URL: https://127.0.0.1:%d/debug/vars", port), lines[5]) assert.Equal("", lines[6]) assert.Equal("", lines[7]) } diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go b/test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go index d17d8a81bd48c1..19bfbc503ae18a 100644 --- a/test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go +++ b/test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go @@ -20,7 +20,7 @@ type agentConfigEndpointInfo struct { } func traceConfigEndpoint(port int) agentConfigEndpointInfo { - return agentConfigEndpointInfo{"trace-agent", "http", port, "/config"} + return agentConfigEndpointInfo{"trace-agent", "https", port, "/config"} } func processConfigEndpoint(port int) agentConfigEndpointInfo { From cb18ab669a2f6dc03fb23d12b271fa145dc22c9b Mon Sep 17 00:00:00 2001 From: morgan-wang <96082814+mwdd146980@users.noreply.github.com> Date: Wed, 29 Jan 2025 14:36:35 -0500 Subject: [PATCH 59/97] [ASCII-1994] downgrade ec2 tag warn log to debug (#33505) --- pkg/util/ec2/ec2_tags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/ec2/ec2_tags.go b/pkg/util/ec2/ec2_tags.go index 99ef4c12613e7f..ab8c682b0287c7 100644 --- a/pkg/util/ec2/ec2_tags.go +++ b/pkg/util/ec2/ec2_tags.go @@ -101,7 +101,7 @@ func fetchEc2TagsFromAPI(ctx context.Context) ([]string, error) { if err == nil { return tags, nil } - log.Warnf("unable to get tags using default credentials (falling back to instance role): %s", err) + log.Debugf("unable to get tags using default credentials (falling back to instance role): %s", err) // If the above fails, for backward compatibility, fall back to our legacy // behavior, where we explicitly query instance role to get credentials. From 88a6d21d40f8a8e858dc91709435462ef0c9cef8 Mon Sep 17 00:00:00 2001 From: Maxime Riaud <65339037+misteriaud@users.noreply.github.com> Date: Wed, 29 Jan 2025 20:47:56 +0100 Subject: [PATCH 60/97] [ASCII-2641] adding e2e test to check that IPC cert is used by the Agent IPC servers (#32570) --- pkg/api/security/cert/cert_getter.go | 2 +- .../tests/agent-shared-components/ipc/docs.go | 7 ++ .../ipc/fixtures/config.yaml.tmpl | 19 ++++ .../ipc/fixtures/security-agent.yaml | 2 + .../ipc/ipc_security_common.go | 91 +++++++++++++++++++ .../ipc/ipc_security_nix_test.go | 85 +++++++++++++++++ .../ipc/ipc_security_win_test.go | 89 ++++++++++++++++++ 7 files changed, 294 insertions(+), 1 deletion(-) create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/docs.go create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go create mode 100644 test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go diff --git a/pkg/api/security/cert/cert_getter.go b/pkg/api/security/cert/cert_getter.go index 09edb10e1cf5e2..d682e18da6820f 100644 --- a/pkg/api/security/cert/cert_getter.go +++ b/pkg/api/security/cert/cert_getter.go @@ -81,7 +81,7 @@ func fetchAgentIPCCert(config configModel.Reader, certCreationAllowed bool) ([]b return nil, nil, fmt.Errorf("unable to read authentication IPC cert/key files: %s", e.Error()) } - // Demultiplexing cert and key from file + // Reading and decoding cert and key from file var block *pem.Block block, rest := pem.Decode(certAndKey) diff --git a/test/new-e2e/tests/agent-shared-components/ipc/docs.go b/test/new-e2e/tests/agent-shared-components/ipc/docs.go new file mode 100644 index 00000000000000..1043030c52dacf --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/docs.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package ipc contains e2e tests for check security aroung IPC communications. +package ipc diff --git a/test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl b/test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl new file mode 100644 index 00000000000000..315fb62b4056d6 --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl @@ -0,0 +1,19 @@ +log_level: debug +ipc_cert_file_path: {{.IPCCertFilePath}} + +cmd_port: {{.AgentCMDPort}} +agent_ipc: + port: {{.AgentIpcPort}} + +apm_config: + enabled: true + debug: + port: {{.ApmCmdPort}} + +security_agent: + cmd_port: {{.SecurityCmdPort}} + +process_config: + process_collection: + enabled: true + cmd_port: {{.ProcessCmdPort}} diff --git a/test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml b/test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml new file mode 100644 index 00000000000000..49517e76a319a5 --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml @@ -0,0 +1,2 @@ +runtime_security_config: + enabled: true diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go new file mode 100644 index 00000000000000..29edba71baed72 --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go @@ -0,0 +1,91 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ipc + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + _ "embed" + "fmt" + "html/template" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" +) + +const ( + coreCMDPort = 5001 + coreIPCPort = 5004 + securityCmdPort = 5010 + apmCmdPort = 5012 + apmReceiverPort = 8126 + processCmdPort = 6162 + configRefreshIntervalSec = 10 +) + +//go:embed fixtures/config.yaml.tmpl +var coreConfigTmpl string + +//go:embed fixtures/security-agent.yaml +var securityAgentConfig string + +type endpoint struct { + name string + port int +} + +// assertAgentUseCert checks that all agents IPC server use the IPC certificate. +func assertAgentUseCert(t *assert.CollectT, host *components.RemoteHost, certPool *x509.CertPool) { + client := host.NewHTTPClient() + + tr := client.Transport.(*http.Transport).Clone() + // Reinitializing tlsConfig and replace transport + tr.TLSClientConfig = &tls.Config{} + client.Transport = tr + + //Assert that it's not working if the IPC cert is not set as RootCA + _, err := client.Get(fmt.Sprintf("https://127.0.0.1:%d", coreCMDPort)) // nolint: bodyclose + require.Error(t, err) + + // Setting IPC certificate as Root CA + tr.TLSClientConfig.RootCAs = certPool + + for _, endpoint := range []endpoint{ + {"coreCMD", coreCMDPort}, + {"coreIPC", coreIPCPort}, + {"securityAgent", securityCmdPort}, + {"traceAgentDebug", apmCmdPort}, + {"processAgent", processCmdPort}, + } { + // Make a request to the server + resp, err := client.Get(fmt.Sprintf("https://127.0.0.1:%d", endpoint.port)) + require.NoErrorf(t, err, "unable to connect to %v", endpoint.name) + defer resp.Body.Close() + + require.NotNilf(t, resp.TLS, "connection to %v didn't used TLS", endpoint.name) + require.Lenf(t, resp.TLS.PeerCertificates, 1, "server of %v server multiple certficiate", endpoint.name) + } +} + +// fillTmplConfig fills the template with the given variables and returns the result. +func fillTmplConfig(t *testing.T, tmplContent string, templateVars any) string { + t.Helper() + + var buffer bytes.Buffer + + tmpl, err := template.New("").Parse(tmplContent) + require.NoError(t, err) + + err = tmpl.Execute(&buffer, templateVars) + require.NoError(t, err) + + return buffer.String() +} diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go new file mode 100644 index 00000000000000..b2b09a535c63e9 --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ipc + +import ( + "crypto/x509" + "encoding/pem" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" +) + +type ipcSecurityLinuxSuite struct { + e2e.BaseSuite[environments.Host] +} + +func TestIPCSecuirityLinuxSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &ipcSecurityLinuxSuite{}, e2e.WithProvisioner(awshost.Provisioner())) +} + +func (v *ipcSecurityLinuxSuite) TestServersideIPCCertUsage() { + rootDir := "/tmp/" + v.T().Name() + v.Env().RemoteHost.MkdirAll(rootDir) + + ipcCertFilePath := "/etc/datadog-agent/ipc_cert.pem" + + // fill the config template + templateVars := map[string]interface{}{ + "IPCCertFilePath": ipcCertFilePath, + "AgentCMDPort": coreCMDPort, + "ApmCmdPort": apmCmdPort, + "AgentIpcPort": coreIPCPort, + "ProcessCmdPort": processCmdPort, + "SecurityCmdPort": securityCmdPort, + } + coreconfig := fillTmplConfig(v.T(), coreConfigTmpl, templateVars) + + // start the agent with that configuration + v.UpdateEnv(awshost.Provisioner( + awshost.WithAgentOptions( + agentparams.WithAgentConfig(coreconfig), + agentparams.WithSecurityAgentConfig(securityAgentConfig), + ), + awshost.WithAgentClientOptions( + agentclientparams.WithTraceAgentOnPort(apmReceiverPort), + agentclientparams.WithProcessAgentOnPort(processCmdPort), + agentclientparams.WithSecurityAgentOnPort(securityCmdPort), + ), + )) + + // get auth token + v.T().Log("Getting the IPC cert") + ipcCertContent := v.Env().RemoteHost.MustExecute("sudo cat " + ipcCertFilePath) + + // Reading and decoding cert and key from file + var block *pem.Block + + block, _ = pem.Decode([]byte(strings.TrimSpace(ipcCertContent))) + require.NotNil(v.T(), block) + require.Equal(v.T(), block.Type, "CERTIFICATE") + cert := pem.EncodeToMemory(block) + + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(cert) + require.True(v.T(), ok) + + // check that the Agent API server use the IPC cert + require.EventuallyWithT(v.T(), func(t *assert.CollectT) { + assertAgentUseCert(t, v.Env().RemoteHost, certPool) + }, 2*configRefreshIntervalSec*time.Second, 1*time.Second) +} diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go new file mode 100644 index 00000000000000..f2c09dd2f1366f --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go @@ -0,0 +1,89 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. +package ipc + +import ( + "crypto/x509" + "encoding/pem" + "testing" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" +) + +type ipcSecurityWindowsSuite struct { + e2e.BaseSuite[environments.Host] +} + +func TestIPCSecurityWindowsSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &ipcSecurityWindowsSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) +} + +func (v *ipcSecurityWindowsSuite) TestServersideIPCCertUsage() { + rootDir := "C:/tmp/" + v.T().Name() + v.Env().RemoteHost.MkdirAll(rootDir) + + ipcCertFilePath := `C:\ProgramData\Datadog\ipc_cert.pem` + + templateVars := map[string]interface{}{ + "IPCCertFilePath": ipcCertFilePath, + "AgentCMDPort": coreCMDPort, + "AgentIpcPort": coreIPCPort, + "ApmCmdPort": apmCmdPort, + "ProcessCmdPort": processCmdPort, + "SecurityCmdPort": securityCmdPort, + } + coreconfig := fillTmplConfig(v.T(), coreConfigTmpl, templateVars) + + agentOptions := []func(*agentparams.Params) error{ + agentparams.WithAgentConfig(coreconfig), + agentparams.WithSecurityAgentConfig(securityAgentConfig), + } + // start the agent with that configuration + v.UpdateEnv(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), + awshost.WithAgentOptions(agentOptions...), + awshost.WithAgentClientOptions( + agentclientparams.WithTraceAgentOnPort(apmReceiverPort), + agentclientparams.WithProcessAgentOnPort(processCmdPort), + ), + )) + + // Currently the e2e framework does not restart the security agent on Windows so we need to do it manually. + // When the framework will support it, remove the line below and add `agentclientparams.WithSecurityAgentOnPort(securityCmdPort)` to the agent options. + v.Env().RemoteHost.MustExecute("Restart-Service datadog-security-agent") + + // get auth token + v.T().Log("Getting the IPC cert") + ipcCertContent, err := v.Env().RemoteHost.ReadFile(ipcCertFilePath) + require.NoError(v.T(), err) + + // Reading and decoding cert and key from file + var block *pem.Block + + block, _ = pem.Decode(ipcCertContent) + require.NotNil(v.T(), block) + require.Equal(v.T(), block.Type, "CERTIFICATE") + cert := pem.EncodeToMemory(block) + + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(cert) + require.True(v.T(), ok) + + // check that the Agent API server use the IPC cert + require.EventuallyWithT(v.T(), func(t *assert.CollectT) { + assertAgentUseCert(t, v.Env().RemoteHost, certPool) + }, 2*configRefreshIntervalSec*time.Second, 1*time.Second) +} From 77f378ed1369434f9514d32c048f47daf5ed8ec2 Mon Sep 17 00:00:00 2001 From: Dan Lepage <140522866+dplepage-dd@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:52:21 -0500 Subject: [PATCH 61/97] [NDM] Profile Cloning (#32375) --- .../snmp/internal/profile/config_profile.go | 14 ++ .../snmp/internal/profile/profile_resolver.go | 6 +- .../snmp/internal/profile/testing_utils.go | 4 +- .../corechecks/snmp/internal/profile/utils.go | 10 +- .../snmp/internal/profile/utils_test.go | 9 +- .../profile/profiledefinition/clone.go | 38 ++++ .../profile/profiledefinition/clone_test.go | 101 ++++++++++ .../profile/profiledefinition/metadata.go | 22 +++ .../profiledefinition/metadata_test.go | 90 +++++++++ .../profile/profiledefinition/metrics.go | 44 +++++ .../profile/profiledefinition/metrics_test.go | 179 ++++++++++++++++++ .../profiledefinition/profile_definition.go | 23 +++ 12 files changed, 522 insertions(+), 18 deletions(-) create mode 100644 pkg/networkdevice/profile/profiledefinition/clone.go create mode 100644 pkg/networkdevice/profile/profiledefinition/clone_test.go create mode 100644 pkg/networkdevice/profile/profiledefinition/metadata_test.go create mode 100644 pkg/networkdevice/profile/profiledefinition/metrics_test.go diff --git a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go index 7b1b70fd4be166..7edaa801160ecf 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go +++ b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go @@ -75,6 +75,11 @@ func (pcm ProfileConfigMap) withNames() ProfileConfigMap { return pcm } +// Clone duplicates a ProfileConfigMap +func (pcm ProfileConfigMap) Clone() ProfileConfigMap { + return profiledefinition.CloneMap(pcm) +} + // ProfileConfig represents a profile configuration. type ProfileConfig struct { DefinitionFile string `yaml:"definition_file"` @@ -82,3 +87,12 @@ type ProfileConfig struct { IsUserProfile bool `yaml:"-"` } + +// Clone duplicates a ProfileConfig +func (p ProfileConfig) Clone() ProfileConfig { + return ProfileConfig{ + DefinitionFile: p.DefinitionFile, + Definition: *p.Definition.Clone(), + IsUserProfile: p.IsUserProfile, + } +} diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver.go b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver.go index c90d99e6586623..9b23e88fcd131f 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver.go @@ -10,11 +10,9 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/mohae/deepcopy" - "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" "github.com/DataDog/datadog-agent/pkg/networkdevice/utils" + "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/configvalidation" ) @@ -41,7 +39,7 @@ func loadResolveProfiles(pConfig ProfileConfigMap, defaultProfiles ProfileConfig continue } - newProfileConfig := deepcopy.Copy(pConfig[name]).(ProfileConfig) + newProfileConfig := pConfig[name].Clone() err := recursivelyExpandBaseProfiles(name, &newProfileConfig.Definition, newProfileConfig.Definition.Extends, []string{}, pConfig, defaultProfiles) if err != nil { log.Warnf("failed to expand profile %q: %v", name, err) diff --git a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go index 71154c3d29f656..562b65c6bcae71 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go +++ b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go @@ -11,8 +11,6 @@ import ( "path/filepath" "regexp" - "github.com/mohae/deepcopy" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -20,7 +18,7 @@ import ( // CopyProfileDefinition copies a profile, it's used for testing func CopyProfileDefinition(profileDef profiledefinition.ProfileDefinition) profiledefinition.ProfileDefinition { - return deepcopy.Copy(profileDef).(profiledefinition.ProfileDefinition) + return *profileDef.Clone() } // SetConfdPathAndCleanProfiles is used for testing only diff --git a/pkg/collector/corechecks/snmp/internal/profile/utils.go b/pkg/collector/corechecks/snmp/internal/profile/utils.go index 0ac5e1d8508d8f..3959305596e815 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/utils.go +++ b/pkg/collector/corechecks/snmp/internal/profile/utils.go @@ -7,19 +7,17 @@ package profile import ( "os" - - "github.com/mohae/deepcopy" ) -// mergeProfiles merges two profiles config map -// we use deepcopy to lower risk of modifying original profiles +// mergeProfiles merges two ProfileConfigMaps +// we clone the profiles to lower the risk of modifying original profiles func mergeProfiles(profilesA ProfileConfigMap, profilesB ProfileConfigMap) ProfileConfigMap { profiles := make(ProfileConfigMap) for k, v := range profilesA { - profiles[k] = deepcopy.Copy(v).(ProfileConfig) + profiles[k] = v.Clone() } for k, v := range profilesB { - profiles[k] = deepcopy.Copy(v).(ProfileConfig) + profiles[k] = v.Clone() } return profiles } diff --git a/pkg/collector/corechecks/snmp/internal/profile/utils_test.go b/pkg/collector/corechecks/snmp/internal/profile/utils_test.go index b097fc0dc48b08..9ecf6d4f61f123 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/utils_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/utils_test.go @@ -7,7 +7,6 @@ package profile import ( "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" - "github.com/mohae/deepcopy" "github.com/stretchr/testify/assert" "testing" ) @@ -34,7 +33,7 @@ func Test_mergeProfiles(t *testing.T) { }, }, } - profilesACopy := deepcopy.Copy(profilesA).(ProfileConfigMap) + profilesACopy := profilesA.Clone() profilesB := ProfileConfigMap{ "profile-p1": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ @@ -56,7 +55,7 @@ func Test_mergeProfiles(t *testing.T) { }, }, } - profilesBCopy := deepcopy.Copy(profilesB).(ProfileConfigMap) + profilesBCopy := profilesB.Clone() actualMergedProfiles := mergeProfiles(profilesA, profilesB) @@ -119,6 +118,6 @@ func Test_mergeProfiles(t *testing.T) { }, } assert.Equal(t, expectedMergedProfiles, actualMergedProfiles) - assert.Equal(t, profilesACopy, profilesA) - assert.Equal(t, profilesBCopy, profilesB) + assert.Equal(t, profilesA, profilesACopy) + assert.Equal(t, profilesB, profilesBCopy) } diff --git a/pkg/networkdevice/profile/profiledefinition/clone.go b/pkg/networkdevice/profile/profiledefinition/clone.go new file mode 100644 index 00000000000000..3986258d78fade --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/clone.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package profiledefinition + +// Cloneable is a generic type for objects that can duplicate themselves. +// It is exclusively used in the form [T Cloneable[T]], i.e. a type that +// has a .Clone() that returns a new instance of itself. +type Cloneable[T any] interface { + Clone() T +} + +// CloneSlice clones all the objects in a slice into a new slice. +func CloneSlice[Slice ~[]T, T Cloneable[T]](s Slice) Slice { + if s == nil { + return nil + } + result := make(Slice, 0, len(s)) + for _, v := range s { + result = append(result, v.Clone()) + } + return result +} + +// CloneMap clones a map[K]T for any cloneable type T. +// The map keys are shallow-copied; values are cloned. +func CloneMap[Map ~map[K]T, K comparable, T Cloneable[T]](m Map) Map { + if m == nil { + return nil + } + result := make(Map, len(m)) + for k, v := range m { + result[k] = v.Clone() + } + return result +} diff --git a/pkg/networkdevice/profile/profiledefinition/clone_test.go b/pkg/networkdevice/profile/profiledefinition/clone_test.go new file mode 100644 index 00000000000000..6ffdb4251ce6f4 --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/clone_test.go @@ -0,0 +1,101 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package profiledefinition + +import ( + "github.com/stretchr/testify/assert" + "slices" + "testing" +) + +type cloneMe struct { + label *string + ps []int +} + +func item(label string, ps ...int) *cloneMe { + return &cloneMe{ + label: &label, + ps: ps, + } +} + +func (c *cloneMe) Clone() *cloneMe { + c2 := &cloneMe{ + ps: slices.Clone(c.ps), + } + if c.label != nil { + var tmp = *c.label + c2.label = &tmp + } + return c2 +} + +func TestCloneSlice(t *testing.T) { + items := []*cloneMe{ + item("a", 1, 2, 3, 4), + item("b", 1, 2), + } + itemsCopy := CloneSlice(items) + *itemsCopy[0].label = "aaa" + itemsCopy[1] = item("bbb", 10, 20) + itemsCopy = append(itemsCopy, item("ccc", 100, 200)) + // items is unchanged + assert.Equal(t, []*cloneMe{ + item("a", 1, 2, 3, 4), + item("b", 1, 2), + }, items) + assert.Equal(t, []*cloneMe{ + item("aaa", 1, 2, 3, 4), + item("bbb", 10, 20), + item("ccc", 100, 200), + }, itemsCopy) +} + +func TestCloneMap(t *testing.T) { + m := map[string]*cloneMe{ + "Item A": item("a", 1, 2, 3, 4), + "Item B": item("b", 1, 2), + } + mCopy := CloneMap(m) + mCopy["Item A"].ps[0] = 100 + mCopy["Item B"] = item("bbb", 10, 20) + mCopy["Item C"] = item("ccc", 100, 200) + assert.Equal(t, map[string]*cloneMe{ + "Item A": item("a", 1, 2, 3, 4), + "Item B": item("b", 1, 2), + }, m) + assert.Equal(t, map[string]*cloneMe{ + "Item A": item("a", 100, 2, 3, 4), + "Item B": item("bbb", 10, 20), + "Item C": item("ccc", 100, 200), + }, mCopy) +} + +func TestCustomSliceClone(t *testing.T) { + type customSlice []*cloneMe + + items := customSlice{ + item("a", 1, 2, 3, 4), + item("b", 1, 2), + } + itemsCopy := CloneSlice(items) + + assert.IsType(t, customSlice{}, itemsCopy) + assert.Equal(t, items, itemsCopy) +} + +func TestCustomMapClone(t *testing.T) { + type customMap map[string]*cloneMe + + m := customMap{ + "Item A": item("a", 1, 2, 3, 4), + "Item B": item("b", 1, 2), + } + mCopy := CloneMap(m) + assert.IsType(t, customMap{}, mCopy) + assert.Equal(t, m, mCopy) +} diff --git a/pkg/networkdevice/profile/profiledefinition/metadata.go b/pkg/networkdevice/profile/profiledefinition/metadata.go index 30212a0dd579f9..00175da123d8f3 100644 --- a/pkg/networkdevice/profile/profiledefinition/metadata.go +++ b/pkg/networkdevice/profile/profiledefinition/metadata.go @@ -28,12 +28,25 @@ func (mc *MetadataConfig) UnmarshalJSON(data []byte) error { return (*ListMap[MetadataResourceConfig])(mc).UnmarshalJSON(data) } +// Clone duplicates this MetadataConfig +func (mc MetadataConfig) Clone() MetadataConfig { + return CloneMap(mc) +} + // MetadataResourceConfig holds configs for a metadata resource type MetadataResourceConfig struct { Fields ListMap[MetadataField] `yaml:"fields" json:"fields"` IDTags MetricTagConfigList `yaml:"id_tags,omitempty" json:"id_tags,omitempty"` } +// Clone duplicates this MetadataResourceConfig +func (c MetadataResourceConfig) Clone() MetadataResourceConfig { + return MetadataResourceConfig{ + Fields: CloneMap(c.Fields), + IDTags: CloneSlice(c.IDTags), + } +} + // MetadataField holds configs for a metadata field type MetadataField struct { Symbol SymbolConfig `yaml:"symbol,omitempty" json:"symbol,omitempty"` @@ -41,6 +54,15 @@ type MetadataField struct { Value string `yaml:"value,omitempty" json:"value,omitempty"` } +// Clone duplicates this MetadataField +func (c MetadataField) Clone() MetadataField { + return MetadataField{ + Symbol: c.Symbol.Clone(), + Symbols: CloneSlice(c.Symbols), + Value: c.Value, + } +} + // NewMetadataResourceConfig returns a new metadata resource config func NewMetadataResourceConfig() MetadataResourceConfig { return MetadataResourceConfig{} diff --git a/pkg/networkdevice/profile/profiledefinition/metadata_test.go b/pkg/networkdevice/profile/profiledefinition/metadata_test.go new file mode 100644 index 00000000000000..0d0661acc8eac8 --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/metadata_test.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package profiledefinition + +import ( + "github.com/stretchr/testify/assert" + "regexp" + "testing" +) + +func makeMetadata() MetadataConfig { + // This is not actually a valid config, since e.g. it has ExtractValue and + // MatchPattern both set; this is just to check that every field gets copied + // properly. + return MetadataConfig{ + "device": MetadataResourceConfig{ + Fields: map[string]MetadataField{ + "name": { + Value: "hey", + Symbol: SymbolConfig{ + OID: "1.2.3", + Name: "someSymbol", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + MatchPattern: ".*", + MatchPatternCompiled: regexp.MustCompile(".*"), + MatchValue: "$1", + ScaleFactor: 100, + Format: "mac_address", + ConstantValueOne: true, + MetricType: "gauge", + }, + }, + }, + IDTags: []MetricTagConfig{ + { + Tag: "foo", + Index: 1, + Column: SymbolConfig{ + Name: "bar", + OID: "1.2.3", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + OID: "2.3.4", + Symbol: SymbolConfigCompat{ + OID: "1.2.3", + Name: "someSymbol", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + IndexTransform: []MetricIndexTransform{ + { + Start: 1, + End: 5, + }, + }, + Mapping: map[string]string{ + "1": "on", + "2": "off", + }, + Match: ".*", + Pattern: regexp.MustCompile(".*"), + Tags: map[string]string{ + "foo": "bar", + }, + SymbolTag: "ok", + }, + }, + }, + } +} + +func TestCloneMetadata(t *testing.T) { + metadata := makeMetadata() + metaCopy := metadata.Clone() + assert.Equal(t, metadata, metaCopy) + // Modify the copy in place + metaCopy["interface"] = MetadataResourceConfig{} + metaCopy["device"].Fields["foo"] = MetadataField{ + Value: "foo", + } + // Original has not changed + assert.Equal(t, makeMetadata(), metadata) + // New one is different + assert.NotEqual(t, metadata, metaCopy) +} diff --git a/pkg/networkdevice/profile/profiledefinition/metrics.go b/pkg/networkdevice/profile/profiledefinition/metrics.go index c29857144d5827..de4c8a0d691700 100644 --- a/pkg/networkdevice/profile/profiledefinition/metrics.go +++ b/pkg/networkdevice/profile/profiledefinition/metrics.go @@ -6,7 +6,9 @@ package profiledefinition import ( + "maps" "regexp" + "slices" ) // ProfileMetricType metric type used to override default type of the metric @@ -51,6 +53,11 @@ const ( // When this happens, in ValidateEnrichMetricTags we harmonize by moving MetricTagConfig.OID to MetricTagConfig.Symbol.OID. type SymbolConfigCompat SymbolConfig +// Clone creates a duplicate of this SymbolConfigCompat +func (s SymbolConfigCompat) Clone() SymbolConfigCompat { + return SymbolConfigCompat(SymbolConfig(s).Clone()) +} + // SymbolConfig holds info for a single symbol/oid type SymbolConfig struct { OID string `yaml:"OID,omitempty" json:"OID,omitempty"` @@ -75,6 +82,14 @@ type SymbolConfig struct { MetricType ProfileMetricType `yaml:"metric_type,omitempty" json:"metric_type,omitempty"` } +// Clone creates a duplicate of this SymbolConfig +func (s SymbolConfig) Clone() SymbolConfig { + // SymbolConfig has no mutable members, so simple assignment copies it. + // (technically this is false - regexes in go are mutable SOLELY through the + // .Longest() method. But we never use that, so we ignore it here) + return s +} + // MetricTagConfig holds metric tag info type MetricTagConfig struct { Tag string `yaml:"tag" json:"tag"` @@ -106,6 +121,18 @@ type MetricTagConfig struct { SymbolTag string `yaml:"-" json:"-"` } +// Clone duplicates this MetricTagConfig +func (m MetricTagConfig) Clone() MetricTagConfig { + m2 := m // non-pointer assignment shallow-copies members + // deep copy symbols and structures + m2.Column = m.Column.Clone() + m2.Symbol = m.Symbol.Clone() + m2.IndexTransform = slices.Clone(m.IndexTransform) + m2.Mapping = maps.Clone(m.Mapping) + m2.Tags = maps.Clone(m.Tags) + return m2 +} + // MetricTagConfigList holds configs for a list of metric tags type MetricTagConfigList []MetricTagConfig @@ -151,6 +178,23 @@ type MetricsConfig struct { Options MetricsConfigOption `yaml:"options,omitempty" json:"options,omitempty"` } +// Clone duplicates this MetricsConfig +func (m MetricsConfig) Clone() MetricsConfig { + return MetricsConfig{ + MIB: m.MIB, + Table: m.Table.Clone(), + Symbol: m.Symbol.Clone(), + OID: m.OID, + Name: m.Name, + Symbols: CloneSlice(m.Symbols), + StaticTags: slices.Clone(m.StaticTags), + MetricTags: CloneSlice(m.MetricTags), + ForcedType: m.ForcedType, + MetricType: m.MetricType, + Options: m.Options, + } +} + // GetSymbolTags returns symbol tags func (m *MetricsConfig) GetSymbolTags() []string { var symbolTags []string diff --git a/pkg/networkdevice/profile/profiledefinition/metrics_test.go b/pkg/networkdevice/profile/profiledefinition/metrics_test.go new file mode 100644 index 00000000000000..0ab7a86d8ed0a8 --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/metrics_test.go @@ -0,0 +1,179 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package profiledefinition + +import ( + "github.com/stretchr/testify/assert" + "regexp" + "testing" +) + +func TestCloneSymbolConfig(t *testing.T) { + s := SymbolConfig{ + OID: "1.2.3.4", + Name: "foo", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + MatchPattern: ".*", + MatchPatternCompiled: regexp.MustCompile(".*"), + MatchValue: "$1", + ScaleFactor: 100, + Format: "mac_address", + ConstantValueOne: true, + MetricType: ProfileMetricTypeCounter, + } + s2 := s.Clone() + assert.Equal(t, s, s2) + // An issue with our previous deepcopy was that regexes were duplicated but + // didn't keep their private internals, causing them to fail when used, so + // double-check that the regex works fine. + assert.True(t, s2.ExtractValueCompiled.MatchString("foo")) +} + +func TestCloneSymbolConfigCompat(t *testing.T) { + s := SymbolConfigCompat{ + OID: "1.2.3.4", + Name: "foo", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + MatchPattern: ".*", + MatchPatternCompiled: regexp.MustCompile(".*"), + MatchValue: "$1", + ScaleFactor: 100, + Format: "mac_address", + ConstantValueOne: true, + MetricType: ProfileMetricTypeCounter, + } + s2 := s.Clone() + assert.Equal(t, s, s2) +} + +func TestCloneMetricTagConfig(t *testing.T) { + c := MetricTagConfig{ + Tag: "foo", + Index: 10, + Column: SymbolConfig{ + OID: "1.2.3.4", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + OID: "2.4", + Symbol: SymbolConfigCompat{}, + IndexTransform: []MetricIndexTransform{ + { + Start: 0, + End: 1, + }, + }, + Mapping: map[string]string{ + "1": "bar", + "2": "baz", + }, + Match: ".*", + Pattern: regexp.MustCompile(".*"), + Tags: map[string]string{ + "foo": "$1", + }, + SymbolTag: "baz", + } + c2 := c.Clone() + assert.Equal(t, c, c2) + c2.Tags["bar"] = "$2" + c2.IndexTransform = append(c2.IndexTransform, MetricIndexTransform{1, 3}) + c2.Mapping["3"] = "foo" + c2.Tag = "bar" + assert.NotEqual(t, c, c2) + // Validate that c has not changed + assert.Equal(t, c, MetricTagConfig{ + Tag: "foo", + Index: 10, + Column: SymbolConfig{ + OID: "1.2.3.4", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + OID: "2.4", + Symbol: SymbolConfigCompat{}, + IndexTransform: []MetricIndexTransform{ + { + Start: 0, + End: 1, + }, + }, + Mapping: map[string]string{ + "1": "bar", + "2": "baz", + }, + Match: ".*", + Pattern: regexp.MustCompile(".*"), + Tags: map[string]string{ + "foo": "$1", + }, + SymbolTag: "baz", + }) +} + +func TestCloneMetricsConfig(t *testing.T) { + buildConf := func() MetricsConfig { + return MetricsConfig{ + MIB: "FOO-MIB", + Table: SymbolConfig{ + OID: "1.2.3.4", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + Symbol: SymbolConfig{ + OID: "1.2.3.4", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + OID: "1.2.3.4", + Name: "foo", + Symbols: []SymbolConfig{ + { + OID: "1.2.3.4", + ExtractValue: ".*", + ExtractValueCompiled: regexp.MustCompile(".*"), + }, + }, + StaticTags: []string{ + "foo", + "bar", + }, + MetricTags: []MetricTagConfig{ + { + IndexTransform: make([]MetricIndexTransform, 0), + }, + }, + ForcedType: ProfileMetricTypeCounter, + MetricType: ProfileMetricTypeGauge, + Options: MetricsConfigOption{ + Placement: 1, + MetricSuffix: ".foo", + }, + } + } + conf := buildConf() + unchanged := buildConf() + + conf2 := conf.Clone() + assert.Equal(t, conf, conf2) + conf2.StaticTags[0] = "baz" + conf2.MetricTags[0].IndexTransform = []MetricIndexTransform{{5, 7}} + conf2.Options.Placement = 2 + conf2.Options.MetricSuffix = ".bar" + assert.Equal(t, unchanged, conf) + assert.NotEqual(t, conf, conf2) +} + +func TestCloneEmpty(t *testing.T) { + mc := MetricsConfig{} + assert.Equal(t, mc, mc.Clone()) + sym := SymbolConfig{} + assert.Equal(t, sym, sym.Clone()) + tag := MetricTagConfig{} + assert.Equal(t, tag, tag.Clone()) +} diff --git a/pkg/networkdevice/profile/profiledefinition/profile_definition.go b/pkg/networkdevice/profile/profiledefinition/profile_definition.go index a87704ec84ce76..1b20e1eb8899c5 100644 --- a/pkg/networkdevice/profile/profiledefinition/profile_definition.go +++ b/pkg/networkdevice/profile/profiledefinition/profile_definition.go @@ -5,6 +5,8 @@ package profiledefinition +import "slices" + // DeviceMeta holds device related static metadata // DEPRECATED in favour of profile metadata syntax type DeviceMeta struct { @@ -54,3 +56,24 @@ func (p *ProfileDefinition) SplitOIDs(includeMetadata bool) ([]string, []string) } return splitOIDs(p.Metrics, p.MetricTags, nil) } + +// Clone duplicates this ProfileDefinition +func (p *ProfileDefinition) Clone() *ProfileDefinition { + if p == nil { + return nil + } + return &ProfileDefinition{ + Name: p.Name, + Description: p.Description, + SysObjectIDs: slices.Clone(p.SysObjectIDs), + Extends: slices.Clone(p.Extends), + Metadata: CloneMap(p.Metadata), + MetricTags: CloneSlice(p.MetricTags), + StaticTags: slices.Clone(p.StaticTags), + Metrics: CloneSlice(p.Metrics), + Device: DeviceMeta{ + Vendor: p.Device.Vendor, + }, + Version: p.Version, + } +} From 9060ad63afc500d70efd75c94989cece7573cdb5 Mon Sep 17 00:00:00 2001 From: Stuart Geipel Date: Wed, 29 Jan 2025 16:01:42 -0500 Subject: [PATCH 62/97] [NPM-4140] Fix test failures that appear in KMT (#33378) --- pkg/network/tracer/tracer_linux_test.go | 71 ++++++++++++++++--------- pkg/network/tracer/tracer_test.go | 14 ++--- 2 files changed, 53 insertions(+), 32 deletions(-) diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index bcd9ad785c7ff3..af8cd8f8dd0ec8 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -181,11 +181,8 @@ func (s *TracerSuite) TestTCPRetransmit() { // Iterate through active connections until we find connection created above, and confirm send + recv counts connections := getConnections(ct, tr) - ok := false - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - if !assert.True(ct, ok) { - return - } + conn, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + require.NotNil(ct, conn) assert.Equal(ct, 100*clientMessageSize, int(conn.Monotonic.SentBytes)) assert.Equal(ct, serverMessageSize, int(conn.Monotonic.RecvBytes)) @@ -1292,13 +1289,14 @@ func testUDPPeekCount(t *testing.T, udpnet, ip string) { var outgoing *network.ConnectionStats require.EventuallyWithTf(t, func(collect *assert.CollectT) { conns := getConnections(collect, tr) - if outgoing == nil { - outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + newOutgoing, _ := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + if newOutgoing != nil { + outgoing = newOutgoing } - if incoming == nil { - incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + newIncoming, _ := findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + if newIncoming != nil { + incoming = newIncoming } - require.NotNil(collect, outgoing) require.NotNil(collect, incoming) }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching") @@ -1359,11 +1357,13 @@ func testUDPPacketSumming(t *testing.T, udpnet, ip string) { var outgoing *network.ConnectionStats require.EventuallyWithTf(t, func(collect *assert.CollectT) { conns := getConnections(collect, tr) - if outgoing == nil { - outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + newOutgoing, _ := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + if newOutgoing != nil { + outgoing = newOutgoing } - if incoming == nil { - incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + newIncoming, _ := findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + if newIncoming != nil { + incoming = newIncoming } require.NotNil(collect, outgoing) @@ -1676,11 +1676,13 @@ func (s *TracerSuite) TestSendfileRegression() { assert.EventuallyWithT(t, func(ct *assert.CollectT) { conns := getConnections(ct, tr) t.Log(conns) - if outConn == nil { - outConn = network.FirstConnection(conns, network.ByType(connType), network.ByFamily(family), network.ByTuple(c.LocalAddr(), c.RemoteAddr())) + newOutConn := network.FirstConnection(conns, network.ByType(connType), network.ByFamily(family), network.ByTuple(c.LocalAddr(), c.RemoteAddr())) + if newOutConn != nil { + outConn = newOutConn } - if inConn == nil { - inConn = network.FirstConnection(conns, network.ByType(connType), network.ByFamily(family), network.ByTuple(c.RemoteAddr(), c.LocalAddr())) + newInConn := network.FirstConnection(conns, network.ByType(connType), network.ByFamily(family), network.ByTuple(c.RemoteAddr(), c.LocalAddr())) + if newInConn != nil { + inConn = newInConn } require.NotNil(ct, outConn) require.NotNil(ct, inConn) @@ -1824,18 +1826,27 @@ func (s *TracerSuite) TestShortWrite() { t := s.T() tr := setupTracer(t, testConfig()) + exit := make(chan struct{}) read := make(chan struct{}) + server := tracertestutil.NewTCPServer(func(c net.Conn) { // set recv buffer to 0 and don't read // to fill up tcp window err := c.(*net.TCPConn).SetReadBuffer(0) require.NoError(t, err) - <-read + select { + case <-read: + case <-exit: + } + + _, err = io.Copy(io.Discard, c) + require.NoError(t, err) + c.Close() }) require.NoError(t, server.Run()) t.Cleanup(func() { - close(read) + close(exit) server.Shutdown() }) @@ -1890,14 +1901,19 @@ func (s *TracerSuite) TestShortWrite() { require.True(t, done) f := os.NewFile(uintptr(sk), "") - defer f.Close() c, err := net.FileConn(f) require.NoError(t, err) + t.Cleanup(func() { c.Close() }) + + unix.Shutdown(sk, unix.SHUT_WR) + close(read) + unix.Close(sk) require.EventuallyWithT(t, func(collect *assert.CollectT) { conns := getConnections(collect, tr) conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) require.True(collect, ok) + require.Equal(collect, sent, conn.Monotonic.SentBytes) }, 3*time.Second, 100*time.Millisecond, "couldn't find connection used by short write") } @@ -2029,12 +2045,15 @@ func (s *TracerSuite) TestPreexistingConnectionDirection() { var incoming, outgoing *network.ConnectionStats require.EventuallyWithT(t, func(collect *assert.CollectT) { connections := getConnections(collect, tr) - if outgoing == nil { - outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + newOutgoing, _ := findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + if newOutgoing != nil { + outgoing = newOutgoing } - if incoming == nil { - incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + newIncoming, _ := findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + if newIncoming != nil { + incoming = newIncoming } + require.NotNil(collect, outgoing) require.NotNil(collect, incoming) if !assert.True(collect, incoming != nil && outgoing != nil) { @@ -2322,7 +2341,7 @@ func TestConntrackerFallback(t *testing.T) { func testConfig() *config.Config { cfg := config.New() - if env.IsECSFargate() { + if env.IsECSFargate() || cfg.EnableEbpfless { // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false } diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index bc45fe5cb12f30..b0cab2a8ebd253 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -402,13 +402,15 @@ func (s *TracerSuite) TestTCPConnsReported() { // Test connections := getConnections(collect, tr) - if forward == nil { - // Server-side - forward, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + // Server-side + newForward, _ := findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + if newForward != nil { + forward = newForward } - if reverse == nil { - // Client-side - reverse, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + // Client-side + newReverse, _ := findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + if newReverse != nil { + reverse = newReverse } require.NotNil(collect, forward) From 996dd54337908a6511948fabd2a41420ba919a8b Mon Sep 17 00:00:00 2001 From: Jeremy Hanna Date: Wed, 29 Jan 2025 15:38:18 -0600 Subject: [PATCH 63/97] Fix windows status e2e assertion with wording change (#33555) --- test/new-e2e/tests/windows/fips-test/fips_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/new-e2e/tests/windows/fips-test/fips_test.go b/test/new-e2e/tests/windows/fips-test/fips_test.go index 8419f43be312e9..afbda5cc51594b 100644 --- a/test/new-e2e/tests/windows/fips-test/fips_test.go +++ b/test/new-e2e/tests/windows/fips-test/fips_test.go @@ -80,7 +80,7 @@ func (s *fipsAgentSuite) TestWithSystemFIPSDisabled() { s.Run("gofips disabled", func() { status, err := s.execAgentCommand("status") require.NoError(s.T(), err) - assert.Contains(s.T(), status, "FIPS compliant: false") + assert.Contains(s.T(), status, "FIPS Mode: disabled") }) }) } @@ -105,13 +105,13 @@ func (s *fipsAgentSuite) TestWithSystemFIPSEnabled() { s.Run("gofips enabled", func() { status, err := s.execAgentCommand("status") require.NoError(s.T(), err) - assert.Contains(s.T(), status, "FIPS compliant: true") + assert.Contains(s.T(), status, "FIPS Mode: enabled") }) s.Run("gofips disabled", func() { status, err := s.execAgentCommand("status") require.NoError(s.T(), err) - assert.Contains(s.T(), status, "FIPS compliant: true") + assert.Contains(s.T(), status, "FIPS Mode: enabled") }) }) } From 091aa0c56e10f287a13cb570767f92d278eb96bd Mon Sep 17 00:00:00 2001 From: Dan Lepage <140522866+dplepage-dd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:34:14 -0500 Subject: [PATCH 64/97] [NDM] Fix profile JSON fields (#32376) --- .../snmp/internal/checkconfig/buildprofile.go | 1 + .../internal/profile/profile_initconfig.go | 6 +-- .../snmp/internal/profile/profile_test.go | 10 +---- .../profile/profiledefinition/metrics.go | 10 ++--- .../profiledefinition/profile_definition.go | 4 +- .../schema/profile_rc_schema.json | 39 +++++++++++++++---- 6 files changed, 44 insertions(+), 26 deletions(-) diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go index 7da0734e7d2403..b01f6ebce04283 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/buildprofile.go @@ -68,6 +68,7 @@ func (c *CheckConfig) BuildProfile(sysObjectID string) (profiledefinition.Profil profile.Metadata = maps.Clone(rootProfile.Metadata) profile.Metrics = append(profile.Metrics, rootProfile.Metrics...) profile.MetricTags = append(profile.MetricTags, rootProfile.MetricTags...) + profile.Device.Vendor = rootProfile.Device.Vendor } profile.Metadata = updateMetadataDefinitionWithDefaults(profile.Metadata, c.CollectTopology) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go b/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go index 700b6a75ffa0af..c0ef2f7c902659 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go @@ -17,11 +17,11 @@ func loadInitConfigProfiles(rawInitConfigProfiles ProfileConfigMap) (ProfileConf log.Warnf("unable to load profile %q: %s", name, err) continue } - if profDefinition.Name == "" { - profDefinition.Name = name - } profConfig.Definition = *profDefinition } + if profConfig.Definition.Name == "" { + profConfig.Definition.Name = name + } initConfigProfiles[name] = profConfig } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go index f2c402e8864274..81bdf282667d95 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go @@ -31,14 +31,9 @@ func Test_loadProfiles(t *testing.T) { name: "OK Use init config profiles", mockConfd: "conf.d", profiles: ProfileConfigMap{ - "my-init-config-profile": ProfileConfig{ - Definition: profiledefinition.ProfileDefinition{ - Name: "my-init-config-profile", - }, - }, + "my-init-config-profile": ProfileConfig{}, "f5-big-ip": ProfileConfig{ // should have precedence over user profiles Definition: profiledefinition.ProfileDefinition{ - Name: "f5-big-ip", Metrics: []profiledefinition.MetricsConfig{ { Symbol: profiledefinition.SymbolConfig{ @@ -64,7 +59,6 @@ func Test_loadProfiles(t *testing.T) { profiles: ProfileConfigMap{ "my-init-config-profile": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ - Name: "my-init-config-profile", MetricTags: profiledefinition.MetricTagConfigList{ { Match: "invalidRegex({[", @@ -102,7 +96,7 @@ func Test_loadProfiles(t *testing.T) { } var actualProfilesNames []string for profileName := range actualProfiles { - actualProfilesNames = append(actualProfilesNames, profileName) + actualProfilesNames = append(actualProfilesNames, actualProfiles[profileName].Definition.Name) } sort.Strings(actualProfilesNames) sort.Strings(tt.expectedProfileNames) diff --git a/pkg/networkdevice/profile/profiledefinition/metrics.go b/pkg/networkdevice/profile/profiledefinition/metrics.go index de4c8a0d691700..33902300f0ed8d 100644 --- a/pkg/networkdevice/profile/profiledefinition/metrics.go +++ b/pkg/networkdevice/profile/profiledefinition/metrics.go @@ -66,9 +66,8 @@ type SymbolConfig struct { ExtractValue string `yaml:"extract_value,omitempty" json:"extract_value,omitempty"` ExtractValueCompiled *regexp.Regexp `yaml:"-" json:"-"` - // MatchPattern/MatchValue are not exposed as json (UI) since ExtractValue can be used instead - MatchPattern string `yaml:"match_pattern,omitempty" json:"-"` - MatchValue string `yaml:"match_value,omitempty" json:"-"` + MatchPattern string `yaml:"match_pattern,omitempty" json:"match_pattern,omitempty"` + MatchValue string `yaml:"match_value,omitempty" json:"match_value,omitempty"` MatchPatternCompiled *regexp.Regexp `yaml:"-" json:"-"` ScaleFactor float64 `yaml:"scale_factor,omitempty" json:"scale_factor,omitempty"` @@ -171,9 +170,10 @@ type MetricsConfig struct { StaticTags []string `yaml:"static_tags,omitempty" json:"-"` MetricTags MetricTagConfigList `yaml:"metric_tags,omitempty" json:"metric_tags,omitempty"` - // DEPRECATED: use MetricType instead. + // DEPRECATED: use Symbol.MetricType instead. ForcedType ProfileMetricType `yaml:"forced_type,omitempty" json:"forced_type,omitempty" jsonschema:"-"` - MetricType ProfileMetricType `yaml:"metric_type,omitempty" json:"metric_type,omitempty"` + // DEPRECATED: use Symbol.MetricType instead. + MetricType ProfileMetricType `yaml:"metric_type,omitempty" json:"metric_type,omitempty" jsonschema:"-"` Options MetricsConfigOption `yaml:"options,omitempty" json:"options,omitempty"` } diff --git a/pkg/networkdevice/profile/profiledefinition/profile_definition.go b/pkg/networkdevice/profile/profiledefinition/profile_definition.go index 1b20e1eb8899c5..1d0e103ca37a14 100644 --- a/pkg/networkdevice/profile/profiledefinition/profile_definition.go +++ b/pkg/networkdevice/profile/profiledefinition/profile_definition.go @@ -20,7 +20,7 @@ type DeviceMeta struct { // 2/ Datadog backend: the profiles are in json format, they are used to store profiles created via UI. // The serialisation of json profiles are defined by the json annotation. type ProfileDefinition struct { - Name string `yaml:"name" json:"name"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` Description string `yaml:"description,omitempty" json:"description,omitempty"` SysObjectIDs StringArray `yaml:"sysobjectid,omitempty" json:"sysobjectid,omitempty"` Extends []string `yaml:"extends,omitempty" json:"extends,omitempty"` @@ -34,7 +34,7 @@ type ProfileDefinition struct { // Version is the profile version. // It is currently used only with downloaded/RC profiles. - Version uint64 `yaml:"version,omitempty" json:"version"` + Version uint64 `yaml:"version,omitempty" json:"version,omitempty"` } // DeviceProfileRcConfig represent the profile stored in remote config. diff --git a/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json b/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json index cbee775db45b2e..9e027f37575dba 100644 --- a/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json +++ b/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json @@ -70,6 +70,12 @@ "extract_value": { "type": "string" }, + "match_pattern": { + "type": "string" + }, + "match_value": { + "type": "string" + }, "scale_factor": { "type": "number" }, @@ -98,6 +104,12 @@ "extract_value": { "type": "string" }, + "match_pattern": { + "type": "string" + }, + "match_value": { + "type": "string" + }, "scale_factor": { "type": "number" }, @@ -153,6 +165,12 @@ "extract_value": { "type": "string" }, + "match_pattern": { + "type": "string" + }, + "match_value": { + "type": "string" + }, "scale_factor": { "type": "number" }, @@ -302,9 +320,6 @@ "metric_tags": { "$ref": "#/$defs/MetricTagConfigList" }, - "metric_type": { - "type": "string" - }, "options": { "$ref": "#/$defs/MetricsConfigOption" } @@ -370,11 +385,7 @@ } }, "additionalProperties": false, - "type": "object", - "required": [ - "name", - "version" - ] + "type": "object" }, "StringArray": { "items": { @@ -393,6 +404,12 @@ "extract_value": { "type": "string" }, + "match_pattern": { + "type": "string" + }, + "match_value": { + "type": "string" + }, "scale_factor": { "type": "number" }, @@ -420,6 +437,12 @@ "extract_value": { "type": "string" }, + "match_pattern": { + "type": "string" + }, + "match_value": { + "type": "string" + }, "scale_factor": { "type": "number" }, From db518f4b0da801580e578eea3c42894a82b2ed1a Mon Sep 17 00:00:00 2001 From: sabrina lu Date: Wed, 29 Jan 2025 17:50:59 -0500 Subject: [PATCH 65/97] add github workflow to bump integrations core (#33455) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .github/workflows/bump_integrations_core.yml | 40 ++++++++++++++ tasks/libs/ciproviders/github_api.py | 24 +++++--- tasks/libs/common/git.py | 16 ++++-- tasks/libs/releasing/json.py | 3 +- tasks/release.py | 58 +++++++++++++++++++- 5 files changed, 127 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/bump_integrations_core.yml diff --git a/.github/workflows/bump_integrations_core.yml b/.github/workflows/bump_integrations_core.yml new file mode 100644 index 00000000000000..c8880e76d53f06 --- /dev/null +++ b/.github/workflows/bump_integrations_core.yml @@ -0,0 +1,40 @@ +name: Bump Integrations Core + +on: + workflow_dispatch: + schedule: + - cron: '0 4 * * 1,3' # Run on Monday, Wednesday at 4:00 UTC +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +permissions: {} + +jobs: + bump_integrations_core: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout the main branch + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + persist-credentials: true + + - name: Install Python + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version-file: .python-version + cache: "pip" + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -r tasks/libs/requirements-github.txt + pip install -r tasks/requirements_release_tasks.txt + + - name: Bump Integrations Core + run: | + inv release.bump-integrations-core --slack-webhook ${{ secrets.BUMP_INTEGRATIONS_CORE_SLACK_WEBHOOK }} + diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py index 52f9055606c7e9..9b6ecca099daa1 100644 --- a/tasks/libs/ciproviders/github_api.py +++ b/tasks/libs/ciproviders/github_api.py @@ -641,14 +641,11 @@ def get_user_query(login): return query + string_var -def create_release_pr(title, base_branch, target_branch, version, changelog_pr=False, milestone=None): +def create_datadog_agent_pr(title, base_branch, target_branch, milestone_name, other_labels=None): print(color_message("Creating PR", "bold")) github = GithubAPI(repository=GITHUB_REPO_NAME) - # Find milestone based on what the next final version is. If the milestone does not exist, fail. - milestone_name = milestone or str(version) - milestone = github.get_milestone_by_name(milestone_name) if not milestone or not milestone.number: @@ -679,12 +676,10 @@ def create_release_pr(title, base_branch, target_branch, version, changelog_pr=F labels = [ "changelog/no-changelog", "qa/no-code-change", - "team/agent-delivery", - "team/agent-release-management", ] - if changelog_pr: - labels.append(f"backport/{get_default_branch()}") + if other_labels: + labels += other_labels updated_pr = github.update_pr( pull_number=pr.number, @@ -704,6 +699,19 @@ def create_release_pr(title, base_branch, target_branch, version, changelog_pr=F return updated_pr.html_url +def create_release_pr(title, base_branch, target_branch, version, changelog_pr=False, milestone=None): + milestone_name = milestone or str(version) + + labels = [ + "team/agent-delivery", + "team/agent-release-management", + ] + if changelog_pr: + labels.append(f"backport/{get_default_branch()}") + + return create_datadog_agent_pr(title, base_branch, target_branch, milestone_name, labels) + + def ask_review_actor(pr): for event in pr.get_issue_events(): if event.event == "labeled" and event.label.name == "ask-review": diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py index bf6383b103c7fb..58223257064874 100644 --- a/tasks/libs/common/git.py +++ b/tasks/libs/common/git.py @@ -254,16 +254,24 @@ def get_last_commit(ctx, repo, branch): ) +def get_git_references(ctx, repo, ref, tags=False): + """ + Fetches a specific reference (ex: branch, tag, or HEAD) from a remote Git repository + """ + filter_by = " -t" if tags else "" + return ctx.run( + rf'git ls-remote{filter_by} https://github.com/DataDog/{repo} "{ref}"', + hide=True, + ).stdout.strip() + + def get_last_release_tag(ctx, repo, pattern): import re from functools import cmp_to_key import semver - tags = ctx.run( - rf'git ls-remote -t https://github.com/DataDog/{repo} "{pattern}"', - hide=True, - ).stdout.strip() + tags = get_git_references(ctx, repo, pattern, tags=True) if not tags: raise Exit( color_message( diff --git a/tasks/libs/releasing/json.py b/tasks/libs/releasing/json.py index 91e68678a8b02e..59cad2cc12ae4b 100644 --- a/tasks/libs/releasing/json.py +++ b/tasks/libs/releasing/json.py @@ -24,8 +24,9 @@ # The order matters, eg. when fetching matching tags for an Agent 6 entry, # tags starting with 6 will be preferred to tags starting with 7. COMPATIBLE_MAJOR_VERSIONS = {6: ["6", "7"], 7: ["7"]} +INTEGRATIONS_CORE_JSON_FIELD = "INTEGRATIONS_CORE_VERSION" RELEASE_JSON_FIELDS_TO_UPDATE = [ - "INTEGRATIONS_CORE_VERSION", + INTEGRATIONS_CORE_JSON_FIELD, "OMNIBUS_SOFTWARE_VERSION", "OMNIBUS_RUBY_VERSION", "MACOS_BUILD_VERSION", diff --git a/tasks/release.py b/tasks/release.py index 43ff33a6f2d244..9de2f6f3040ebf 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -20,7 +20,7 @@ from invoke import Failure, task from invoke.exceptions import Exit -from tasks.libs.ciproviders.github_api import GithubAPI, create_release_pr +from tasks.libs.ciproviders.github_api import GithubAPI, create_datadog_agent_pr, create_release_pr from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo from tasks.libs.common.color import Color, color_message from tasks.libs.common.constants import ( @@ -32,6 +32,7 @@ check_clean_branch_state, clone, get_default_branch, + get_git_references, get_last_commit, get_last_release_tag, is_agent6, @@ -56,6 +57,7 @@ from tasks.libs.releasing.json import ( DEFAULT_BRANCHES, DEFAULT_BRANCHES_AGENT6, + INTEGRATIONS_CORE_JSON_FIELD, UNFREEZE_REPOS, _get_release_json_value, _save_release_json, @@ -1337,3 +1339,57 @@ def check_previous_agent6_rc(ctx): payload = {'message': err_msg} send_slack_msg(ctx, payload, os.environ.get("SLACK_DATADOG_AGENT_CI_WEBHOOK")) raise Exit(message=err_msg, code=1) + + +@task +def bump_integrations_core(ctx, slack_webhook=None): + """ + Create a PR to bump the integrations core fields in the release.json file + """ + if os.environ.get("GITHUB_ACTIONS"): + set_git_config('user.name', 'github-actions[bot]') + set_git_config('user.email', 'github-actions[bot]@users.noreply.github.com') + + commit_hash = get_git_references(ctx, "integrations-core", "HEAD").split()[0] + + rj = load_release_json() + + for nightly in ["nightly", "nightly-a7"]: + rj[nightly][INTEGRATIONS_CORE_JSON_FIELD] = commit_hash + + _save_release_json(rj) + + main_branch = "main" + bump_integrations_core_branch = f"bump-integrations-core-{int(time.time())}" + ctx.run(f"git checkout -b {bump_integrations_core_branch}") + ctx.run("git add release.json") + + commit_message = "Update integrations core to HEAD" + ok = try_git_command(ctx, f"git commit -m '{commit_message}'") + if not ok: + raise Exit( + color_message( + f"Could not create commit. Please commit manually with:\ngit commit -m {commit_message}\n, push the {bump_integrations_core_branch} branch and then open a PR against {main_branch}.", + "red", + ), + code=1, + ) + + if not ctx.run(f"git push --set-upstream origin {bump_integrations_core_branch}", warn=True): + raise Exit( + color_message( + f"Could not push branch {bump_integrations_core_branch} to the upstream 'origin'. Please push it manually and then open a PR against {main_branch}.", + "red", + ), + code=1, + ) + + # to find the correct current milestone 'devel' is set to False even though this will only run on development branches + current = current_version(ctx, 7) + current.rc = False + current.devel = False + pr_url = create_datadog_agent_pr(commit_message, main_branch, bump_integrations_core_branch, str(current)) + + if slack_webhook: + payload = {'pr_url': pr_url} + send_slack_msg(ctx, payload, slack_webhook) From ff5ef16b8bf9208a4d094b76ff75f6e0cb160d9d Mon Sep 17 00:00:00 2001 From: Amit Slavin <108348428+amitslavin@users.noreply.github.com> Date: Thu, 30 Jan 2025 11:34:15 +0200 Subject: [PATCH 66/97] [USM] Change http2 error log level to warn (#33571) --- pkg/network/protocols/http2/model_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/network/protocols/http2/model_linux.go b/pkg/network/protocols/http2/model_linux.go index f5675cd8372eb1..93303e323a2ac5 100644 --- a/pkg/network/protocols/http2/model_linux.go +++ b/pkg/network/protocols/http2/model_linux.go @@ -216,7 +216,7 @@ func (tx *EbpfTx) Method() http.Method { // if the length of the method is greater than the buffer, then we return 0. if int(tx.Stream.Request_method.Length) > len(tx.Stream.Request_method.Raw_buffer) || tx.Stream.Request_method.Length == 0 { if oversizedLogLimit.ShouldLog() { - log.Errorf("method length %d is longer than the size buffer: %v and is huffman encoded: %v", + log.Warnf("method length %d is longer than the size buffer: %v and is huffman encoded: %v", tx.Stream.Request_method.Length, tx.Stream.Request_method.Raw_buffer, tx.Stream.Request_method.Is_huffman_encoded) } return http.MethodUnknown From 5675c7e5722303ba648c550f4c8a5dc16b814cfa Mon Sep 17 00:00:00 2001 From: Wassim Dhif Date: Thu, 30 Jan 2025 11:21:14 +0100 Subject: [PATCH 67/97] feat(dogstatsd): add cardinality common field (#32917) Signed-off-by: Wassim DHIF --- comp/dogstatsd/server/enrich.go | 11 +++--- comp/dogstatsd/server/enrich_bench_test.go | 2 +- comp/dogstatsd/server/enrich_test.go | 39 ++++++++++++++++++- comp/dogstatsd/server/parse.go | 8 ++++ comp/dogstatsd/server/parse_events.go | 4 ++ comp/dogstatsd/server/parse_metrics.go | 2 + comp/dogstatsd/server/parse_service_checks.go | 4 ++ ...sd_cardinality_field-564b3d846700335c.yaml | 12 ++++++ 8 files changed, 74 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/dogstatsd_cardinality_field-564b3d846700335c.yaml diff --git a/comp/dogstatsd/server/enrich.go b/comp/dogstatsd/server/enrich.go index 3ff1fdb26e61d1..f7212f7cef9ee7 100644 --- a/comp/dogstatsd/server/enrich.go +++ b/comp/dogstatsd/server/enrich.go @@ -37,7 +37,7 @@ type enrichConfig struct { } // extractTagsMetadata returns tags (client tags + host tag) and information needed to query tagger (origins, cardinality). -func extractTagsMetadata(tags []string, originFromUDS string, processID uint32, localData origindetection.LocalData, externalData origindetection.ExternalData, conf enrichConfig) ([]string, string, taggertypes.OriginInfo, metrics.MetricSource) { +func extractTagsMetadata(tags []string, originFromUDS string, processID uint32, localData origindetection.LocalData, externalData origindetection.ExternalData, cardinality string, conf enrichConfig) ([]string, string, taggertypes.OriginInfo, metrics.MetricSource) { host := conf.defaultHostname metricSource := metrics.MetricSourceDogstatsd @@ -47,6 +47,7 @@ func extractTagsMetadata(tags []string, originFromUDS string, processID uint32, LocalData: localData, ExternalData: externalData, ProductOrigin: origindetection.ProductOriginDogStatsD, + Cardinality: cardinality, } origin.LocalData.ProcessID = processID @@ -58,7 +59,7 @@ func extractTagsMetadata(tags []string, originFromUDS string, processID uint32, } else if strings.HasPrefix(tag, entityIDTagPrefix) { origin.LocalData.PodUID = tag[len(entityIDTagPrefix):] continue - } else if strings.HasPrefix(tag, CardinalityTagPrefix) { + } else if strings.HasPrefix(tag, CardinalityTagPrefix) && origin.Cardinality == "" { origin.Cardinality = tag[len(CardinalityTagPrefix):] continue } else if strings.HasPrefix(tag, jmxCheckNamePrefix) { @@ -115,7 +116,7 @@ func tsToFloatForSamples(ts time.Time) float64 { func enrichMetricSample(dest []metrics.MetricSample, ddSample dogstatsdMetricSample, origin string, processID uint32, listenerID string, conf enrichConfig) []metrics.MetricSample { metricName := ddSample.name - tags, hostnameFromTags, extractedOrigin, metricSource := extractTagsMetadata(ddSample.tags, origin, processID, ddSample.localData, ddSample.externalData, conf) + tags, hostnameFromTags, extractedOrigin, metricSource := extractTagsMetadata(ddSample.tags, origin, processID, ddSample.localData, ddSample.externalData, ddSample.cardinality, conf) if !isExcluded(metricName, conf.metricPrefix, conf.metricPrefixBlacklist) { metricName = conf.metricPrefix + metricName @@ -195,7 +196,7 @@ func enrichEventAlertType(dogstatsdAlertType alertType) metricsevent.AlertType { } func enrichEvent(event dogstatsdEvent, origin string, processID uint32, conf enrichConfig) *metricsevent.Event { - tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(event.tags, origin, processID, event.localData, event.externalData, conf) + tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(event.tags, origin, processID, event.localData, event.externalData, event.cardinality, conf) enrichedEvent := &metricsevent.Event{ Title: event.title, @@ -232,7 +233,7 @@ func enrichServiceCheckStatus(status serviceCheckStatus) servicecheck.ServiceChe } func enrichServiceCheck(serviceCheck dogstatsdServiceCheck, origin string, processID uint32, conf enrichConfig) *servicecheck.ServiceCheck { - tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(serviceCheck.tags, origin, processID, serviceCheck.localData, serviceCheck.externalData, conf) + tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(serviceCheck.tags, origin, processID, serviceCheck.localData, serviceCheck.externalData, serviceCheck.cardinality, conf) enrichedServiceCheck := &servicecheck.ServiceCheck{ CheckName: serviceCheck.name, diff --git a/comp/dogstatsd/server/enrich_bench_test.go b/comp/dogstatsd/server/enrich_bench_test.go index 80d405280f539c..2d0c31fc16d010 100644 --- a/comp/dogstatsd/server/enrich_bench_test.go +++ b/comp/dogstatsd/server/enrich_bench_test.go @@ -35,7 +35,7 @@ func BenchmarkExtractTagsMetadata(b *testing.B) { sb.ResetTimer() for n := 0; n < sb.N; n++ { - tags, _, _, _ = extractTagsMetadata(baseTags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, conf) + tags, _, _, _ = extractTagsMetadata(baseTags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, "", conf) } }) } diff --git a/comp/dogstatsd/server/enrich_test.go b/comp/dogstatsd/server/enrich_test.go index 2e17893686a23d..c3e95b1d9eda4d 100644 --- a/comp/dogstatsd/server/enrich_test.go +++ b/comp/dogstatsd/server/enrich_test.go @@ -1115,6 +1115,7 @@ func TestEnrichTags(t *testing.T) { originFromMsg []byte localData origindetection.LocalData externalData origindetection.ExternalData + cardinality string conf enrichConfig } tests := []struct { @@ -1463,12 +1464,46 @@ func TestEnrichTags(t *testing.T) { }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, + { + name: "cardinality field as none", + args: args{ + cardinality: types.NoneCardinalityString, + }, + wantedTags: nil, + wantedOrigin: taggertypes.OriginInfo{ + Cardinality: "none", + }, + wantedMetricSource: metrics.MetricSourceDogstatsd, + }, + { + name: "cardinality field as high", + args: args{ + cardinality: types.HighCardinalityString, + }, + wantedTags: nil, + wantedOrigin: taggertypes.OriginInfo{ + Cardinality: "high", + }, + wantedMetricSource: metrics.MetricSourceDogstatsd, + }, + { + name: "cardinality field with dd.internal.card", + args: args{ + tags: []string{"env:prod", "dd.internal.card:high"}, + cardinality: types.NoneCardinalityString, + }, + wantedTags: []string{"env:prod", "dd.internal.card:high"}, + wantedOrigin: taggertypes.OriginInfo{ + Cardinality: "none", + }, + wantedMetricSource: metrics.MetricSourceDogstatsd, + }, } for _, tt := range tests { tt.wantedOrigin.ProductOrigin = origindetection.ProductOriginDogStatsD t.Run(tt.name, func(t *testing.T) { - tags, host, origin, metricSource := extractTagsMetadata(tt.args.tags, tt.args.originFromUDS, 0, tt.args.localData, tt.args.externalData, tt.args.conf) + tags, host, origin, metricSource := extractTagsMetadata(tt.args.tags, tt.args.originFromUDS, 0, tt.args.localData, tt.args.externalData, tt.args.cardinality, tt.args.conf) assert.Equal(t, tt.wantedTags, tags) assert.Equal(t, tt.wantedHost, host) assert.Equal(t, tt.wantedOrigin, origin) @@ -1516,7 +1551,7 @@ func TestEnrichTagsWithJMXCheckName(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tags, _, _, metricSource := extractTagsMetadata(tt.tags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, enrichConfig{}) + tags, _, _, metricSource := extractTagsMetadata(tt.tags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, "", enrichConfig{}) assert.Equal(t, tt.wantedTags, tags) assert.Equal(t, tt.wantedMetricSource, metricSource) assert.NotContains(t, tags, tt.jmxCheckName) diff --git a/comp/dogstatsd/server/parse.go b/comp/dogstatsd/server/parse.go index 8b7ae778af0503..e7b6b3b5123cf0 100644 --- a/comp/dogstatsd/server/parse.go +++ b/comp/dogstatsd/server/parse.go @@ -48,6 +48,9 @@ var ( // externalDataPrefix is the prefix for a common field which contains the external data for Origin Detection. externalDataPrefix = []byte("e:") + + // cardinalityPrefix is the prefix for a common field which contains the cardinality for Origin Detection. + cardinalityPrefix = []byte("card:") ) // parser parses dogstatsd messages @@ -174,6 +177,7 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error var tags []string var localData origindetection.LocalData var externalData origindetection.ExternalData + var cardinality string var optionalField []byte var timestamp time.Time for message != nil { @@ -207,6 +211,9 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error // external data case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) + // cardinality + case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, cardinalityPrefix): + cardinality = string(optionalField[len(cardinalityPrefix):]) } } @@ -220,6 +227,7 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error tags: tags, localData: localData, externalData: externalData, + cardinality: cardinality, ts: timestamp, }, nil } diff --git a/comp/dogstatsd/server/parse_events.go b/comp/dogstatsd/server/parse_events.go index b84a530d180990..65e5393e2dad73 100644 --- a/comp/dogstatsd/server/parse_events.go +++ b/comp/dogstatsd/server/parse_events.go @@ -43,6 +43,8 @@ type dogstatsdEvent struct { localData origindetection.LocalData // externalData is used for Origin Detection externalData origindetection.ExternalData + // cardinality is used for Origin Detection + cardinality string } type eventHeader struct { @@ -170,6 +172,8 @@ func (p *parser) applyEventOptionalField(event dogstatsdEvent, optionalField []b newEvent.localData = p.parseLocalData(optionalField[len(localDataPrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): newEvent.externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) + case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, cardinalityPrefix): + newEvent.cardinality = string(optionalField[len(cardinalityPrefix):]) } if err != nil { return event, err diff --git a/comp/dogstatsd/server/parse_metrics.go b/comp/dogstatsd/server/parse_metrics.go index 0c6da45f9d1205..8edba062bf863b 100644 --- a/comp/dogstatsd/server/parse_metrics.go +++ b/comp/dogstatsd/server/parse_metrics.go @@ -51,6 +51,8 @@ type dogstatsdMetricSample struct { localData origindetection.LocalData // externalData is used for Origin Detection externalData origindetection.ExternalData + // cardinality is used for Origin Detection + cardinality string // timestamp read in the message if any ts time.Time } diff --git a/comp/dogstatsd/server/parse_service_checks.go b/comp/dogstatsd/server/parse_service_checks.go index 6f4cc2239f2fa6..1c230d088e3739 100644 --- a/comp/dogstatsd/server/parse_service_checks.go +++ b/comp/dogstatsd/server/parse_service_checks.go @@ -34,6 +34,8 @@ type dogstatsdServiceCheck struct { localData origindetection.LocalData // externalData is used for Origin Detection externalData origindetection.ExternalData + // cardinality is used for Origin Detection + cardinality string } var ( @@ -104,6 +106,8 @@ func (p *parser) applyServiceCheckOptionalField(serviceCheck dogstatsdServiceChe newServiceCheck.localData = p.parseLocalData(optionalField[len(localDataPrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): newServiceCheck.externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) + case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, cardinalityPrefix): + newServiceCheck.cardinality = string(optionalField[len(cardinalityPrefix):]) } if err != nil { return serviceCheck, err diff --git a/releasenotes/notes/dogstatsd_cardinality_field-564b3d846700335c.yaml b/releasenotes/notes/dogstatsd_cardinality_field-564b3d846700335c.yaml new file mode 100644 index 00000000000000..a5979aba81a64c --- /dev/null +++ b/releasenotes/notes/dogstatsd_cardinality_field-564b3d846700335c.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add new `card:` common field to DogStatsD Datagram specification to allow + customer to specify the cardinality of the metric. This field is optional. From 70b588e17dc3ae2dbbf5d982910fb3d32e9082ea Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Thu, 30 Jan 2025 11:48:48 +0100 Subject: [PATCH 68/97] [FA][HA Agent][NDMII-3267] Add config_id metadata (#33215) --- comp/metadata/inventoryagent/README.md | 4 +++- .../inventoryagent/inventoryagentimpl/inventoryagent.go | 6 ++++++ .../inventoryagentimpl/inventoryagent_test.go | 8 ++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/comp/metadata/inventoryagent/README.md b/comp/metadata/inventoryagent/README.md index 413e08865fb976..bf38c7e0041987 100644 --- a/comp/metadata/inventoryagent/README.md +++ b/comp/metadata/inventoryagent/README.md @@ -124,6 +124,7 @@ The payload is a JSON dict with the following fields - `ecs_fargate_task_arn` - **string**: if the Agent runs in ECS Fargate, contains the Agent's Task ARN. Else, is empty. - `ecs_fargate_cluster_name` - **string**: if the Agent runs in ECS Fargate, contains the Agent's cluster name. Else, is empty. - `fleet_policies_applied` -- **array of string**: The Fleet Policies that have been applied to the agent, if any. Is empty if no policy is applied. + - `config_id` -- **string**: the Fleet Config ID, the configuration value `config_id`. ("scrubbed" indicates that secrets are removed from the field value just as they are in logs) @@ -167,7 +168,8 @@ Here an example of an inventory payload: "environment_variable_configuration": "api_key: \"***************************aaaaa\"", "remote_configuration": "log_level: \"debug\"", "cli_configuration": "log_level: \"warn\"", - "source_local_configuration": "" + "source_local_configuration": "", + "config_id": "my-config" } "hostname": "my-host", "timestamp": 1631281754507358895 diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go index 44310c9ba98d36..01023bc2f0c464 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go @@ -357,6 +357,10 @@ func (ia *inventoryagent) fetchECSFargateAgentMetadata() { ia.data["ecs_fargate_cluster_name"] = taskMeta.ClusterName } +func (ia *inventoryagent) fetchFleetMetadata() { + ia.data["config_id"] = ia.conf.GetString("config_id") +} + func (ia *inventoryagent) refreshMetadata() { // Core Agent / agent ia.fetchCoreAgentMetadata() @@ -368,6 +372,8 @@ func (ia *inventoryagent) refreshMetadata() { ia.fetchTraceAgentMetadata() // system-probe ecosystem ia.fetchSystemProbeMetadata() + // Fleet + ia.fetchFleetMetadata() } func (ia *inventoryagent) writePayloadAsJSON(w http.ResponseWriter, _ *http.Request) { diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index f8466bef53cb4b..c71d7105fae304 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -664,6 +664,14 @@ dynamic_instrumentation: assert.True(t, ia.data["feature_dynamic_instrumentation_enabled"].(bool)) } +func TestFetchFleet(t *testing.T) { + ia := getTestInventoryPayload(t, map[string]any{ + "config_id": "my-config", + }, nil) + ia.fetchFleetMetadata() + assert.Equal(t, "my-config", ia.data["config_id"].(string)) +} + func TestGetProvidedConfigurationDisable(t *testing.T) { ia := getTestInventoryPayload(t, map[string]any{ "inventories_configuration_enabled": false, From 7ead26c51c8f625854070a94dc471d954d410b2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:58:16 +0100 Subject: [PATCH 69/97] Bump the aws-sdk-go-v2 group across 1 directory with 7 updates (#33431) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Marethyu <45374460+Pythyu@users.noreply.github.com> --- test/new-e2e/go.mod | 44 +++++++++++------------ test/new-e2e/go.sum | 88 ++++++++++++++++++++++----------------------- 2 files changed, 66 insertions(+), 66 deletions(-) diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 3ec32b911c0488..d1ec7304d0928b 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -61,11 +61,11 @@ require ( // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB github.com/DataDog/test-infra-definitions v0.0.0-20250127165314-f92dca10d03c - github.com/aws/aws-sdk-go-v2 v1.33.0 - github.com/aws/aws-sdk-go-v2/config v1.29.1 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 - github.com/aws/aws-sdk-go-v2/service/eks v1.51.0 - github.com/aws/aws-sdk-go-v2/service/ssm v1.55.2 + github.com/aws/aws-sdk-go-v2 v1.34.0 + github.com/aws/aws-sdk-go-v2/config v1.29.2 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.201.1 + github.com/aws/aws-sdk-go-v2/service/eks v1.57.0 + github.com/aws/aws-sdk-go-v2/service/ssm v1.56.8 github.com/cenkalti/backoff v2.2.1+incompatible github.com/docker/cli v27.5.0+incompatible github.com/docker/docker v27.5.1+incompatible @@ -110,23 +110,23 @@ require ( github.com/alessio/shellescape v1.4.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.54 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.55 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.38.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.53.2 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 // indirect - github.com/aws/smithy-go v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.53.9 + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 @@ -283,7 +283,7 @@ require ( github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 github.com/aws/aws-sdk-go v1.55.6 - github.com/aws/aws-sdk-go-v2/service/s3 v1.72.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1 github.com/aws/session-manager-plugin v0.0.0-20241119210807-82dc72922492 github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c github.com/hairyhenderson/go-codeowners v0.7.0 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index ff8cd7edab5a74..c37844f41f2150 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -46,54 +46,54 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= -github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= -github.com/aws/aws-sdk-go-v2/config v1.29.1 h1:JZhGawAyZ/EuJeBtbQYnaoftczcb2drR2Iq36Wgz4sQ= -github.com/aws/aws-sdk-go-v2/config v1.29.1/go.mod h1:7bR2YD5euaxBhzt2y/oDkt3uNRb6tjFp98GlTFueRwk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.54 h1:4UmqeOqJPvdvASZWrKlhzpRahAulBfyTJQUaYy4+hEI= -github.com/aws/aws-sdk-go-v2/credentials v1.17.54/go.mod h1:RTdfo0P0hbbTxIhmQrOsC/PquBZGabEPnCaxxKRPSnI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 h1:5grmdTdMsovn9kPZPI23Hhvp0ZyNm5cRO+IZFIYiAfw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24/go.mod h1:zqi7TVKTswH3Ozq28PkmBmgzG1tona7mo9G2IJg4Cis= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 h1:igORFSiH3bfq4lxKFkTSYDhJEUCYo6C8VKiWJjYwQuQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28/go.mod h1:3So8EA/aAYm36L7XIvCVwLa0s5N0P7o2b1oqnx/2R4g= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 h1:1mOW9zAUMhTSrMDssEHS/ajx8JcAj/IcftzcmNlmVLI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28/go.mod h1:kGlXVIWDfvt2Ox5zEaNglmq0hXPHgQFNMix33Tw22jA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 h1:GeNJsIFHB+WW5ap2Tec4K6dzcVTsRbsT1Lra46Hv9ME= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26/go.mod h1:zfgMpwHDXX2WGoG84xG2H+ZlPTkJUU4YUvx2svLQYWo= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0/go.mod h1:I76S7jN0nfsYTBtuTgTsJtK2Q8yJVDgrLr5eLN64wMA= +github.com/aws/aws-sdk-go-v2 v1.34.0 h1:9iyL+cjifckRGEVpRKZP3eIxVlL06Qk1Tk13vreaVQU= +github.com/aws/aws-sdk-go-v2 v1.34.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/config v1.29.2 h1:JuIxOEPcSKpMB0J+khMjznG9LIhIBdmqNiEcPclnwqc= +github.com/aws/aws-sdk-go-v2/config v1.29.2/go.mod h1:HktTHregOZwNSM/e7WTfVSu9RCX+3eOv+6ij27PtaYs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.55 h1:CDhKnDEaGkLA5ZszV/qw5uwN5M8rbv9Cl0JRN+PRsaM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.55/go.mod h1:kPD/vj+RB5MREDUky376+zdnjZpR+WgdBBvwrmnlmKE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 h1:kU7tmXNaJ07LsyN3BUgGqAmVmQtq0w6duVIHAKfp0/w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25/go.mod h1:OiC8+OiqrURb1wrwmr/UbOVLFSWEGxjinj5C299VQdo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 h1:Ej0Rf3GMv50Qh4G4852j2djtoDb7AzQ7MuQeFHa3D70= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29/go.mod h1:oeNTC7PwJNoM5AznVr23wxhLnuJv0ZDe5v7w0wqIs9M= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 h1:6e8a71X+9GfghragVevC5bZqvATtc3mAMgxpSNbgzF0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29/go.mod h1:c4jkZiQ+BWpNqq7VtrxjwISrLrt/VvPq3XiopkUIolI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 h1:g9OUETuxA8i/Www5Cby0R3WSTe7ppFTZXHVLNskNS4w= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29/go.mod h1:CQk+koLR1QeY1+vm7lqNfFii07DEderKq6T3F1L2pyc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.201.1 h1:HJUHMHbBg3stGO7ZZfpwbeK9xVhGS7GK8NScady6Moc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.201.1/go.mod h1:cRD0Fhzj0YD+uAh16NChQAv9/BB0S9x3YK9hLx1jb/k= github.com/aws/aws-sdk-go-v2/service/ecr v1.38.1 h1:pCI3RIJnZEUs0evNm+pdDzvAp+YwpabUyQTPPvxO8oY= github.com/aws/aws-sdk-go-v2/service/ecr v1.38.1/go.mod h1:NqKnlZvLl4Tp2UH/GEc/nhbjmPQhwOXmLp2eldiszLM= -github.com/aws/aws-sdk-go-v2/service/ecs v1.53.2 h1:o/FdG76sTAoC8h20j6bSBE6MPJYOZhNIh0nJ8Q8druY= -github.com/aws/aws-sdk-go-v2/service/ecs v1.53.2/go.mod h1:YpTRClSDOPvN2e3kiIrYOx1sI+YKTZVmlMiNO2AwYhE= -github.com/aws/aws-sdk-go-v2/service/eks v1.51.0 h1:BYyB+byjQ7oyupe3v+YjTp1yfmfNEwChYA2naCc85xI= -github.com/aws/aws-sdk-go-v2/service/eks v1.51.0/go.mod h1:oaPCqTzAe8C5RQZJGRD4RENcV7A4n99uGxbD4rULbNg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 h1:tB4tNw83KcajNAzaIMhkhVI2Nt8fAZd5A5ro113FEMY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7/go.mod h1:lvpyBGkZ3tZ9iSsUIcC2EWp+0ywa7aK3BLT+FwZi+mQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 h1:Hi0KGbrnr57bEHWM0bJ1QcBzxLrL/k2DHvGYhb8+W1w= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7/go.mod h1:wKNgWgExdjjrm4qvfbTorkvocEstaoDl4WCvGfeCy9c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.72.0 h1:SAfh4pNx5LuTafKKWR02Y+hL3A+3TX8cTKG1OIAJaBk= -github.com/aws/aws-sdk-go-v2/service/s3 v1.72.0/go.mod h1:r+xl5yzMk9083rMR+sJ5TYj9Tihvf/l1oxzZXDgGj2Q= -github.com/aws/aws-sdk-go-v2/service/ssm v1.55.2 h1:z6Pq4+jtKlhK4wWJGHRGwMLGjC1HZwAO3KJr/Na0tSU= -github.com/aws/aws-sdk-go-v2/service/ssm v1.55.2/go.mod h1:DSmu/VZzpQlAubWBbAvNpt+S4k/XweglJi4XaDGyvQk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10/go.mod h1:Fzsj6lZEb8AkTE5S68OhcbBqeWPsR8RnGuKPr8Todl8= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 h1:BRVDbewN6VZcwr+FBOszDKvYeXY1kJ+GGMCcpghlw0U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.9/go.mod h1:f6vjfZER1M17Fokn0IzssOTMT2N8ZSq+7jnNF0tArvw= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.9 h1:zP4i8gzYXFt20kS6YHdm3UWqKFj1I1qQT3fqu8cK8OQ= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.9/go.mod h1:XGmGx8WmR+Kz6c5Nm6WaRZMGwR6ERnoCNGXDPfT8XSA= +github.com/aws/aws-sdk-go-v2/service/eks v1.57.0 h1:+g6K3PF6xeCqGr2MJT8CnwrluWQv0BlHO9RrwivHwWk= +github.com/aws/aws-sdk-go-v2/service/eks v1.57.0/go.mod h1:XXCcNup2LhXfIllxo6fCyHY31J8RLU3d3sM/lGGnO/s= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 h1:EP1ITDgYVPM2dL1bBBntJ7AW5yTjuWGz9XO+CZwpALU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3/go.mod h1:5lWNWeAgWenJ/BZ/CP9k9DjLbC0pjnM045WjXRPPi14= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 h1:hN4yJBGswmFTOVYqmbz1GBs9ZMtQe8SrYxPwrkrlRv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10/go.mod h1:TsxON4fEZXyrKY+D+3d2gSTyJkGORexIYab9PTf56DA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 h1:fXoWC2gi7tdJYNTPnnlSGzEVwewUchOi8xVq/dkg8Qs= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10/go.mod h1:cvzBApD5dVazHU8C2rbBQzzzsKc8m5+wNJ9mCRZLKPc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1 h1:9LawY3cDJ3HE+v2GMd5SOkNLDwgN4K7TsCjyVBYu/L4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1/go.mod h1:hHnELVnIHltd8EOF3YzahVX6F6y2C6dNqpRj1IMkS5I= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.8 h1:MBdLPDbhwvgIpjIVAo2K49b+mJgthRfq3pJ57OMF7Ro= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.8/go.mod h1:9XDwaJPbim0IsiHqC/jWwXviigOiQJC+drPPy6ZfIlE= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 h1:kznaW4f81mNMlREkU9w3jUuJvU5g/KsqDV43ab7Rp6s= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.12/go.mod h1:bZy9r8e0/s0P7BSDHgMLXK2KvdyRRBIQ2blKlvLt0IU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 h1:mUwIpAvILeKFnRx4h1dEgGEFGuV8KJ3pEScZWVFYuZA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11/go.mod h1:JDJtD+b8HNVv71axz8+S5492KM8wTzHRFpMKQbPlYxw= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 h1:g9d+TOsu3ac7SgmY2dUf1qMgu/uJVTlQ4VCbH6hRxSw= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.10/go.mod h1:WZfNmntu92HO44MVZAubQaz3qCuIdeOdog2sADfU6hU= github.com/aws/session-manager-plugin v0.0.0-20241119210807-82dc72922492 h1:Ihams/fjKo4iWwM313ng2gCJWoetsL7ZQkXhOTmVUq4= github.com/aws/session-manager-plugin v0.0.0-20241119210807-82dc72922492/go.mod h1:7n17tunRPUsniNBu5Ja9C7WwJWTdOzaLqr/H0Ns3uuI= -github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= -github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= From 6115a6c613ef1f78f6812b4bc77c599022d4f71c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Thu, 30 Jan 2025 14:17:58 +0100 Subject: [PATCH 70/97] [EBPF] gpu: use flaky.MarkOnLog on e2e test (#33524) --- test/new-e2e/tests/gpu/gpu_test.go | 5 +++-- test/new-e2e/tests/gpu/provisioner.go | 14 -------------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/test/new-e2e/tests/gpu/gpu_test.go b/test/new-e2e/tests/gpu/gpu_test.go index 1983a309541cbb..4b268eeb56f9b0 100644 --- a/test/new-e2e/tests/gpu/gpu_test.go +++ b/test/new-e2e/tests/gpu/gpu_test.go @@ -53,8 +53,9 @@ func mandatoryMetricTagRegexes() []*regexp.Regexp { // TestGPUSuite runs tests for the VM interface to ensure its implementation is correct. // Not to be run in parallel, as some tests wait until the checks are available. func TestGPUSuite(t *testing.T) { - // incident-33572 - flake.Mark(t) + // incident-33572. Pulumi seems to sometimes fail to create the stack with an error + // we are not able to debug from the logs. We mark the test as flaky in that case only. + flake.MarkOnLog(t, "error: an unhandled error occurred: waiting for RPCs:") provParams := getDefaultProvisionerParams() // Append our vectorAdd image for testing diff --git a/test/new-e2e/tests/gpu/provisioner.go b/test/new-e2e/tests/gpu/provisioner.go index ea2e571de239ad..b36bfd9bd79877 100644 --- a/test/new-e2e/tests/gpu/provisioner.go +++ b/test/new-e2e/tests/gpu/provisioner.go @@ -142,20 +142,6 @@ func gpuInstanceProvisioner(params *provisionerParams) provisioners.Provisioner if err != nil { return fmt.Errorf("validateDockerCuda failed: %w", err) } - // incident-33572: log the output of the CUDA validation command - pulumi.All(dockerCudaValidateCmd.StdoutOutput(), dockerCudaValidateCmd.StderrOutput()).ApplyT(func(args []interface{}) error { - stdout := args[0].(string) - stderr := args[1].(string) - err := ctx.Log.Info(fmt.Sprintf("Docker CUDA validation stdout: %s", stdout), nil) - if err != nil { - return err - } - err = ctx.Log.Info(fmt.Sprintf("Docker CUDA validation stderr: %s", stderr), nil) - if err != nil { - return err - } - return nil - }) // Combine agent options from the parameters with the fakeintake and docker dependencies params.agentOptions = append(params.agentOptions, From 0d8791e2fbe8db0d9441231be6778effd6eb210a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Thu, 30 Jan 2025 14:21:58 +0100 Subject: [PATCH 71/97] [EBPF] ebpfcheck: use XDP as the program type for the entry count helper (#33536) --- .../corechecks/ebpf/probe/ebpfcheck/map_prog_helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/map_prog_helper.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/map_prog_helper.go index 4435bf5163a34d..89f1659a7a9bd7 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/map_prog_helper.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/map_prog_helper.go @@ -120,7 +120,7 @@ func (c *mapProgHelperCache) newHelperProgramForFd(fd int) (helperProgData, erro */ spec := &ebpf.ProgramSpec{ - Type: ebpf.SocketFilter, + Type: ebpf.XDP, // Use XDP to ensure maximum compatibility (e.g., socket filter programs cannot deal with maps that contain spin locks) Instructions: asm.Instructions{ // entry btf.WithFuncMetadata( From 7f9b35c1740c4fc762223db1505bda8a2f39f6b0 Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Thu, 30 Jan 2025 14:35:37 +0100 Subject: [PATCH 72/97] discovery: module: fix concurrent map accesses + simplify lock handling (#33573) --- .../servicediscovery/module/impl_linux.go | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index ccb3dd0b58e302..9cd69a221a38f9 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -215,6 +215,9 @@ func (s *discovery) handleStatusEndpoint(w http.ResponseWriter, _ *http.Request) } func (s *discovery) handleDebugEndpoint(w http.ResponseWriter, _ *http.Request) { + s.mux.Lock() + defer s.mux.Unlock() + services := make([]model.Service, 0) procRoot := kernel.ProcFSRoot() @@ -461,17 +464,11 @@ type parsingContext struct { // addIgnoredPid store excluded pid. func (s *discovery) addIgnoredPid(pid int32) { - s.mux.Lock() - defer s.mux.Unlock() - s.ignorePids[pid] = struct{}{} } // shouldIgnorePid returns true if process should be excluded from handling. func (s *discovery) shouldIgnorePid(pid int32) bool { - s.mux.Lock() - defer s.mux.Unlock() - _, found := s.ignorePids[pid] return found } @@ -622,9 +619,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service } var info *serviceInfo - s.mux.RLock() cached, ok := s.cache[pid] - s.mux.RUnlock() if ok { info = cached } else { @@ -633,9 +628,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service return nil } - s.mux.Lock() s.cache[pid] = info - s.mux.Unlock() } if s.shouldIgnoreService(info.name) { @@ -787,7 +780,6 @@ func (s *discovery) enrichContainerData(service *model.Service, containers map[s service.ContainerServiceNameSource = tagName service.CheckedContainerData = true - s.mux.Lock() serviceInfo, ok := s.cache[int32(service.PID)] if ok { serviceInfo.containerServiceName = serviceName @@ -795,7 +787,6 @@ func (s *discovery) enrichContainerData(service *model.Service, containers map[s serviceInfo.checkedContainerData = true serviceInfo.containerID = id } - s.mux.Unlock() } func (s *discovery) updateCacheInfo(response *model.ServicesResponse, now time.Time) { @@ -848,6 +839,9 @@ func (s *discovery) handleStoppedServices(response *model.ServicesResponse, aliv // getStatus returns the list of currently running services. func (s *discovery) getServices() (*model.ServicesResponse, error) { + s.mux.Lock() + defer s.mux.Unlock() + procRoot := kernel.ProcFSRoot() pids, err := process.Pids() if err != nil { @@ -913,9 +907,6 @@ func (s *discovery) getServices() (*model.ServicesResponse, error) { log.Debugf("[pid: %d] adding process to potential: %s", pid, service.Name) } - s.mux.Lock() - defer s.mux.Unlock() - s.updateCacheInfo(response, now) s.handleStoppedServices(response, alivePids) From 734013ee47af02b9e71bdf20a38931fe76d280dd Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Thu, 30 Jan 2025 14:37:36 +0100 Subject: [PATCH 73/97] Updated CODEOWNERS with new agent-discovery team (#33547) --- .ddqa/config.toml | 7 +++++++ .github/CODEOWNERS | 12 ++++++------ tasks/libs/issue/model/constants.py | 1 + tasks/libs/pipeline/github_jira_map.yaml | 1 + tasks/libs/pipeline/github_slack_map.yaml | 1 + tasks/libs/pipeline/github_slack_review_map.yaml | 1 + 6 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.ddqa/config.toml b/.ddqa/config.toml index 17e3811c30163b..b19ea5b7a576a1 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -66,6 +66,13 @@ github_team = "agent-delivery" github_labels = ["team/agent-delivery"] exclude_members = ["KSerrania"] +[teams."Agent Discovery"] +jira_project = "DSCVR" +jira_issue_type = "Task" +jira_statuses = ["To Do", "In Progress", "Done"] +github_team = "agent-discovery" +github_labels = ["team/agent-discovery"] + [teams."Universal Service Monitoring"] jira_project = "USMON" jira_issue_type = "Task" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 71dffe21a3b55e..b6516eeb7033bd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -232,7 +232,7 @@ /cmd/system-probe/modules/tcp_queue_tracer* @DataDog/container-integrations /cmd/system-probe/modules/traceroute* @DataDog/network-device-monitoring @Datadog/Networks /cmd/system-probe/modules/ping* @DataDog/ndm-core -/cmd/system-probe/modules/language_detection* @DataDog/container-intake @DataDog/universal-service-monitoring +/cmd/system-probe/modules/language_detection* @DataDog/container-intake @DataDog/universal-service-monitoring @DataDog/agent-discovery /cmd/system-probe/modules/dynamic_instrumentation* @DataDog/debugger /cmd/system-probe/windows_resources/ @DataDog/windows-kernel-integrations /cmd/system-probe/main_windows*.go @DataDog/windows-kernel-integrations @@ -345,7 +345,7 @@ /pkg/commonchecks/ @DataDog/agent-metrics /pkg/cli/ @DataDog/agent-shared-components /pkg/cli/subcommands/clusterchecks @DataDog/container-platform -/pkg/discovery/ @DataDog/universal-service-monitoring +/pkg/discovery/ @DataDog/agent-discovery /pkg/errors/ @DataDog/agent-shared-components /pkg/fips @DataDog/agent-shared-components /pkg/gohai @DataDog/agent-shared-components @@ -404,7 +404,7 @@ /pkg/collector/corechecks/net/ @DataDog/agent-shared-components /pkg/collector/corechecks/oracle @DataDog/database-monitoring /pkg/collector/corechecks/sbom/ @DataDog/container-integrations -/pkg/collector/corechecks/servicediscovery/ @DataDog/universal-service-monitoring +/pkg/collector/corechecks/servicediscovery/ @DataDog/agent-discovery /pkg/collector/corechecks/snmp/ @DataDog/ndm-core /pkg/collector/corechecks/system/ @DataDog/agent-shared-components /pkg/collector/corechecks/system/**/*_windows*.go @DataDog/agent-shared-components @DataDog/windows-agent @@ -485,7 +485,7 @@ /pkg/util/testutil/patternscanner.go @DataDog/universal-service-monitoring @DataDog/ebpf-platform /pkg/util/testutil/docker @DataDog/universal-service-monitoring @DataDog/ebpf-platform /pkg/util/trie @DataDog/container-integrations -/pkg/languagedetection @DataDog/container-intake @DataDog/universal-service-monitoring +/pkg/languagedetection @DataDog/container-intake @DataDog/universal-service-monitoring @DataDog/agent-discovery /pkg/linters/ @DataDog/agent-devx-loops /pkg/linters/components/ @DataDog/agent-shared-components /pkg/logs/ @DataDog/agent-logs @@ -607,7 +607,7 @@ /test/fakeintake/ @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /test/fakeintake/aggregator/ndmflowAggregator.go @DataDog/ndm-integrations /test/fakeintake/aggregator/ndmflowAggregator_test.go @DataDog/ndm-integrations -/test/fakeintake/aggregator/servicediscovery* @DataDog/universal-service-monitoring +/test/fakeintake/aggregator/servicediscovery* @DataDog/agent-discovery /test/new-e2e/ @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /test/new-e2e/pkg/components/datadog-installer @DataDog/windows-agent /test/new-e2e/test-infra-definition @DataDog/agent-devx-loops @@ -617,7 +617,7 @@ /test/new-e2e/tests/agent-shared-components @DataDog/agent-shared-components /test/new-e2e/tests/agent-subcommands @DataDog/agent-shared-components /test/new-e2e/tests/containers @DataDog/container-integrations @DataDog/container-platform -/test/new-e2e/tests/discovery @DataDog/universal-service-monitoring +/test/new-e2e/tests/discovery @DataDog/agent-discovery /test/new-e2e/tests/fips-compliance @DataDog/agent-shared-components /test/new-e2e/tests/ha-agent @DataDog/ndm-core /test/new-e2e/tests/language-detection @DataDog/container-intake diff --git a/tasks/libs/issue/model/constants.py b/tasks/libs/issue/model/constants.py index b7335aa901d293..de9592a6e02dde 100644 --- a/tasks/libs/issue/model/constants.py +++ b/tasks/libs/issue/model/constants.py @@ -41,4 +41,5 @@ 'apm-onboarding', 'fleet', 'agent-processing-and-routing', + 'agent-discovery', ) diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml index ba89ba7de8dbd7..58ec5fa29c9959 100644 --- a/tasks/libs/pipeline/github_jira_map.yaml +++ b/tasks/libs/pipeline/github_jira_map.yaml @@ -44,3 +44,4 @@ '@datadog/injection-platform': INPLAT '@datadog/agent-processing-and-routing': APR '@DataDog/container-ecosystems': CECO +'@datadog/agent-discovery': DSCVR diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index 73e7fe4df4d86b..75dbeeab794e15 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -48,3 +48,4 @@ '@datadog/apm-ecosystems-performance': '#apm-benchmarking-platform' '@DataDog/container-ecosystems': '#container-ecosystems-ops' '@datadog/injection-platform': '#injection-platform' +'@datadog/agent-discovery': '#agent-discovery' diff --git a/tasks/libs/pipeline/github_slack_review_map.yaml b/tasks/libs/pipeline/github_slack_review_map.yaml index 59ddec09a8a4e0..58a93bd2347a42 100644 --- a/tasks/libs/pipeline/github_slack_review_map.yaml +++ b/tasks/libs/pipeline/github_slack_review_map.yaml @@ -48,3 +48,4 @@ '@datadog/apm-onboarding': '#apm-onboarding' '@datadog/apm-ecosystems-performance': '#apm-benchmarking-platform' '@datadog/injection-platform': '#injection-platform' +'@datadog/agent-discovery': '#agent-discovery' From e445fe6ea1f5b8dda0fa2acb39310e0be500a2b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Thu, 30 Jan 2025 09:35:24 -0500 Subject: [PATCH 74/97] [ACIX-546] Fix tag-devel for releasing (#33579) --- tasks/release.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tasks/release.py b/tasks/release.py index 9de2f6f3040ebf..10e148f6c3ab8c 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -269,8 +269,9 @@ def tag_version( @task def tag_devel(ctx, release_branch, commit="HEAD", push=True, force=False): - tag_version(ctx, release_branch, commit, push, force, devel=True) - tag_modules(ctx, release_branch, commit, push, force, devel=True, trust=True) + with agent_context(ctx, get_default_branch(major=get_version_major(release_branch))): + tag_version(ctx, release_branch, commit, push, force, devel=True) + tag_modules(ctx, release_branch, commit, push, force, devel=True, trust=True) @task From 360b074228c090b644a6889355f7a2412b9801ce Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:42:34 +0000 Subject: [PATCH 75/97] [omnibus][automated] Bump OMNIBUS_SOFTWARE_VERSION (#33570) Co-authored-by: chouquette --- release.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release.json b/release.json index b6cd397296bf6e..fbf16c98808bf8 100644 --- a/release.json +++ b/release.json @@ -6,7 +6,7 @@ "7": "7.61.0" }, "nightly": { - "OMNIBUS_SOFTWARE_VERSION": "dafdaa1231032f6a2e30ca39573306b55db4b962", + "OMNIBUS_SOFTWARE_VERSION": "d4a12d8a009e1c497e5e740e1ea5c8d23d6864ca", "OMNIBUS_RUBY_VERSION": "49ba11883cdf5692a39095d1a036a1ef59a25210", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", @@ -25,7 +25,7 @@ "INTEGRATIONS_CORE_VERSION": "6782bb7cf5da2ce9e0ab77d8420ac849d31cb8a6" }, "nightly-a7": { - "OMNIBUS_SOFTWARE_VERSION": "dafdaa1231032f6a2e30ca39573306b55db4b962", + "OMNIBUS_SOFTWARE_VERSION": "d4a12d8a009e1c497e5e740e1ea5c8d23d6864ca", "OMNIBUS_RUBY_VERSION": "49ba11883cdf5692a39095d1a036a1ef59a25210", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", From 77e59acdd78d420679362528a43594226bc7504d Mon Sep 17 00:00:00 2001 From: Minyi Zhu Date: Thu, 30 Jan 2025 10:13:44 -0500 Subject: [PATCH 76/97] [CONTP-587]Workloadmeta: Add mig config to GPU collector (#33120) --- .../collectors/internal/nvml/nvml.go | 120 +++++++++++++++--- comp/core/workloadmeta/def/types.go | 31 +++++ pkg/gpu/testutil/mocks.go | 6 + 3 files changed, 136 insertions(+), 21 deletions(-) diff --git a/comp/core/workloadmeta/collectors/internal/nvml/nvml.go b/comp/core/workloadmeta/collectors/internal/nvml/nvml.go index 049a6389b71370..2e91919f241d24 100644 --- a/comp/core/workloadmeta/collectors/internal/nvml/nvml.go +++ b/comp/core/workloadmeta/collectors/internal/nvml/nvml.go @@ -34,6 +34,102 @@ type collector struct { nvmlLib nvml.Interface } +func (c *collector) getDeviceInfo(device nvml.Device) (string, string, error) { + uuid, ret := device.GetUUID() + if ret != nvml.SUCCESS { + return "", "", fmt.Errorf("failed to get device UUID: %v", nvml.ErrorString(ret)) + } + name, ret := device.GetName() + if ret != nvml.SUCCESS { + return "", "", fmt.Errorf("failed to get device name: %v", nvml.ErrorString(ret)) + } + return uuid, name, nil +} + +// getMigProfileName() returns the canonical name of the MIG device +func getMigProfileName(attr nvml.DeviceAttributes) (string, error) { + g := attr.GpuInstanceSliceCount + gb := ((attr.MemorySizeMB + 1024 - 1) / 1024) + r := fmt.Sprintf("%dg.%dgb", g, gb) + return r, nil +} + +func (c *collector) getDeviceInfoMig(migDevice nvml.Device) (*workloadmeta.MigDevice, error) { + uuid, name, err := c.getDeviceInfo(migDevice) + if err != nil { + return nil, err + } + gpuInstanceID, ret := c.nvmlLib.DeviceGetGpuInstanceId(migDevice) + if ret != nvml.SUCCESS { + return nil, fmt.Errorf("failed to get GPU instance ID: %v", nvml.ErrorString(ret)) + } + attr, ret := migDevice.GetAttributes() + if ret != nvml.SUCCESS { + return nil, fmt.Errorf("failed to get device attributes: %v", nvml.ErrorString(ret)) + } + canonoicalName, _ := getMigProfileName(attr) + return &workloadmeta.MigDevice{ + GPUInstanceID: gpuInstanceID, + UUID: uuid, + Name: name, + GPUInstanceSliceCount: attr.GpuInstanceSliceCount, + MemorySizeMB: attr.MemorySizeMB, + ResourceName: canonoicalName, + }, nil +} + +func (c *collector) getGPUdeviceInfo(device nvml.Device) (*workloadmeta.GPU, error) { + uuid, name, err := c.getDeviceInfo(device) + if err != nil { + return nil, err + } + gpuIndexID, ret := c.nvmlLib.DeviceGetIndex(device) + if ret != nvml.SUCCESS { + return nil, fmt.Errorf("failed to get GPU index ID: %v", nvml.ErrorString(ret)) + } + gpuDeviceInfo := workloadmeta.GPU{ + EntityID: workloadmeta.EntityID{ + Kind: workloadmeta.KindGPU, + ID: uuid, + }, + EntityMeta: workloadmeta.EntityMeta{ + Name: name, + }, + Vendor: nvidiaVendor, + Device: name, + Index: gpuIndexID, + MigEnabled: false, + MigDevices: nil, + } + + migEnabled, _, ret := c.nvmlLib.DeviceGetMigMode(device) + if ret == nvml.SUCCESS && migEnabled == nvml.DEVICE_MIG_ENABLE { + // If any mid detection fails, we will return an mig disabled in config + migDeviceCount, ret := c.nvmlLib.DeviceGetMaxMigDeviceCount(device) + if ret != nvml.SUCCESS { + log.Warnf("failed to get MIG capable device count: %v", nvml.ErrorString(ret)) + return &gpuDeviceInfo, nil + } + migDevs := make([]*workloadmeta.MigDevice, 0, migDeviceCount) + for j := 0; j < migDeviceCount; j++ { + migDevice, ret := c.nvmlLib.DeviceGetMigDeviceHandleByIndex(device, j) + if ret != nvml.SUCCESS { + log.Warnf("failed to get handle for MIG device %d: %v", j, nvml.ErrorString(ret)) + return &gpuDeviceInfo, nil + } + migDeviceInfo, err := c.getDeviceInfoMig(migDevice) + if err != nil { + log.Warnf("failed to get device info for MIG device %d: %v", j, err) + return &gpuDeviceInfo, nil + } + migDevs = append(migDevs, migDeviceInfo) + } + gpuDeviceInfo.MigEnabled = true + gpuDeviceInfo.MigDevices = migDevs + } + return &gpuDeviceInfo, nil +} + // NewCollector returns a kubelet CollectorProvider that instantiates its collector func NewCollector() (workloadmeta.CollectorProvider, error) { return workloadmeta.CollectorProvider{ @@ -80,27 +176,9 @@ func (c *collector) Pull(_ context.Context) error { return fmt.Errorf("failed to get device handle for index %d: %v", i, nvml.ErrorString(ret)) } - uuid, ret := dev.GetUUID() - if ret != nvml.SUCCESS { - return fmt.Errorf("failed to get device UUID for index %d: %v", i, nvml.ErrorString(ret)) - } - - name, ret := dev.GetName() - if ret != nvml.SUCCESS { - return fmt.Errorf("failed to get device name for index %d: %v", i, nvml.ErrorString(ret)) - } - - gpu := &workloadmeta.GPU{ - EntityID: workloadmeta.EntityID{ - Kind: workloadmeta.KindGPU, - ID: uuid, - }, - EntityMeta: workloadmeta.EntityMeta{ - Name: name, - }, - Vendor: nvidiaVendor, - Device: name, - Index: i, + gpu, err := c.getGPUdeviceInfo(dev) + if err != nil { + return err } arch, ret := dev.GetArchitecture() diff --git a/comp/core/workloadmeta/def/types.go b/comp/core/workloadmeta/def/types.go index e02a834f9ab821..2207b3440a07ea 100644 --- a/comp/core/workloadmeta/def/types.go +++ b/comp/core/workloadmeta/def/types.go @@ -1380,6 +1380,30 @@ type GPU struct { // SMCount is the number of streaming multiprocessors in the GPU. Optional, can be empty. SMCount int + + // MigEnabled is true if the GPU supports MIG (Multi-Instance GPU) and it is enabled. + MigEnabled bool + // MigDevices is a list of MIG devices that are part of the GPU. + MigDevices []*MigDevice +} + +// MigDevice contains information about a MIG device, including the GPU instance ID, device info, attributes, and profile. Nvidia MIG allows a single physical GPU to be partitioned into multiple isolated GPU instances so that multiple workloads can run on the same GPU. +type MigDevice struct { + // GPUInstanceID is the ID of the GPU instance. This is a unique identifier inside the parent GPU device. + GPUInstanceID int + // UUID is the device id retrieved from nvml in the format "MIG-XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXX" + UUID string + Name string + // GPUInstanceSliceCount and MemorySizeInGb are retrieved from the profile + // mig 1g.10gb profile will have GPUInstanceSliceCount = 1 and MemorySizeMB = 10000 + GPUInstanceSliceCount uint32 + MemorySizeMB uint64 + // ResourceName is the resource of the profile used, e.g. "1g.10gb", "2g.20gb", etc. + ResourceName string +} + +func (m *MigDevice) String() string { + return fmt.Sprintf("GPU Instance ID: %d, UUID: %s, Resource: %s", m.GPUInstanceID, m.UUID, m.ResourceName) } var _ Entity = &GPU{} @@ -1427,6 +1451,13 @@ func (g GPU) String(verbose bool) string { _, _ = fmt.Fprintln(&sb, "Architecture:", g.Architecture) _, _ = fmt.Fprintln(&sb, "Compute Capability:", g.ComputeCapability) _, _ = fmt.Fprintln(&sb, "Streaming Multiprocessor Count:", g.SMCount) + if g.MigEnabled { + _, _ = fmt.Fprintln(&sb, "----------- MIG Device -----------") + _, _ = fmt.Fprintln(&sb, "MIG Enabled: true") + for _, migDevice := range g.MigDevices { + _, _ = fmt.Fprintln(&sb, migDevice.String()) + } + } return sb.String() } diff --git a/pkg/gpu/testutil/mocks.go b/pkg/gpu/testutil/mocks.go index 3ac12184aac2c6..90a2896334e78b 100644 --- a/pkg/gpu/testutil/mocks.go +++ b/pkg/gpu/testutil/mocks.go @@ -106,6 +106,12 @@ func GetBasicNvmlMock() *nvmlmock.Interface { DeviceGetCudaComputeCapabilityFunc: func(nvml.Device) (int, int, nvml.Return) { return 7, 5, nvml.SUCCESS }, + DeviceGetIndexFunc: func(nvml.Device) (int, nvml.Return) { + return 0, nvml.SUCCESS + }, + DeviceGetMigModeFunc: func(nvml.Device) (int, int, nvml.Return) { + return nvml.DEVICE_MIG_DISABLE, 0, nvml.SUCCESS + }, } } From ebb2e6739347b79745d479d26245911afa6434e5 Mon Sep 17 00:00:00 2001 From: Gustavo Caso Date: Thu, 30 Jan 2025 17:52:51 +0100 Subject: [PATCH 77/97] [ASCII-2734] remove redundant build tags from process agent (#33528) --- tasks/build_tags.py | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/tasks/build_tags.py b/tasks/build_tags.py index a053fc575d20c4..86f6be662a8d61 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -126,25 +126,30 @@ INSTALLER_TAGS = {"docker", "ec2", "kubelet"} # PROCESS_AGENT_TAGS lists the tags necessary to build the process-agent -PROCESS_AGENT_TAGS = AGENT_TAGS.union({"fargateprocess"}).difference({"otlp", "python", "trivy"}) +PROCESS_AGENT_TAGS = { + "containerd", + "no_dynamic_plugins", + "cri", + "crio", + "datadog.no_waf", + "ec2", + "docker", + "fargateprocess", + "kubelet", + "netcgo", + "podman", + "zlib", + "zstd", +} # PROCESS_AGENT_HEROKU_TAGS lists the tags necessary to build the process-agent for Heroku -PROCESS_AGENT_HEROKU_TAGS = PROCESS_AGENT_TAGS.difference( - { - "containerd", - "no_dynamic_plugins", - "cri", - "crio", - "docker", - "ec2", - "jetson", - "kubeapiserver", - "kubelet", - "orchestrator", - "podman", - "systemd", - } -) +PROCESS_AGENT_HEROKU_TAGS = { + "datadog.no_waf", + "fargateprocess", + "netcgo", + "zlib", + "zstd", +} # SECURITY_AGENT_TAGS lists the tags necessary to build the security agent SECURITY_AGENT_TAGS = { From 5f4441f28b278a7430cfe4a506f005129cf1934c Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Thu, 30 Jan 2025 17:53:30 +0100 Subject: [PATCH 78/97] [CWS] do not generate getter for length functions (#33584) --- .../generators/accessors/accessors.go | 1 + .../secl/model/field_accessors_unix.go | 456 ------------------ .../secl/model/field_accessors_windows.go | 62 --- 3 files changed, 1 insertion(+), 518 deletions(-) diff --git a/pkg/security/generators/accessors/accessors.go b/pkg/security/generators/accessors/accessors.go index 3b9124959ef37e..7975e8351358f0 100644 --- a/pkg/security/generators/accessors/accessors.go +++ b/pkg/security/generators/accessors/accessors.go @@ -247,6 +247,7 @@ func handleNonEmbedded(module *common.Module, field seclField, prefixedFieldName func addLengthOpField(module *common.Module, alias string, field *common.StructField) *common.StructField { lengthField := *field + lengthField.GenGetters = false lengthField.IsLength = true lengthField.Name += ".length" lengthField.OrigType = "int" diff --git a/pkg/security/secl/model/field_accessors_unix.go b/pkg/security/secl/model/field_accessors_unix.go index a64213aa03f45a..babfff0508eaed 100644 --- a/pkg/security/secl/model/field_accessors_unix.go +++ b/pkg/security/secl/model/field_accessors_unix.go @@ -26,14 +26,6 @@ func (ev *Event) GetChdirFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File) } -// GetChdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePathLength() int { - if ev.GetEventType().String() != "chdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File)) -} - // GetChmodFilePath returns the value of the field, resolving if necessary func (ev *Event) GetChmodFilePath() string { if ev.GetEventType().String() != "chmod" { @@ -42,14 +34,6 @@ func (ev *Event) GetChmodFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File) } -// GetChmodFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePathLength() int { - if ev.GetEventType().String() != "chmod" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File)) -} - // GetChownFilePath returns the value of the field, resolving if necessary func (ev *Event) GetChownFilePath() string { if ev.GetEventType().String() != "chown" { @@ -58,14 +42,6 @@ func (ev *Event) GetChownFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File) } -// GetChownFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePathLength() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File)) -} - // GetContainerCreatedAt returns the value of the field, resolving if necessary func (ev *Event) GetContainerCreatedAt() int { if ev.BaseEvent.ContainerContext == nil { @@ -145,17 +121,6 @@ func (ev *Event) GetExecFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) } -// GetExecFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePathLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent)) -} - // GetExecForkTime returns the value of the field, resolving if necessary func (ev *Event) GetExecForkTime() time.Time { if ev.GetEventType().String() != "exec" { @@ -203,17 +168,6 @@ func (ev *Event) GetExecInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) } -// GetExecInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePathLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent)) -} - // GetExecPid returns the value of the field, resolving if necessary func (ev *Event) GetExecPid() uint32 { if ev.GetEventType().String() != "exec" { @@ -324,17 +278,6 @@ func (ev *Event) GetExitFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) } -// GetExitFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePathLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent)) -} - // GetExitForkTime returns the value of the field, resolving if necessary func (ev *Event) GetExitForkTime() time.Time { if ev.GetEventType().String() != "exit" { @@ -382,17 +325,6 @@ func (ev *Event) GetExitInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) } -// GetExitInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePathLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent)) -} - // GetExitPid returns the value of the field, resolving if necessary func (ev *Event) GetExitPid() uint32 { if ev.GetEventType().String() != "exit" { @@ -445,14 +377,6 @@ func (ev *Event) GetLinkFileDestinationPath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target) } -// GetLinkFileDestinationPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPathLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target)) -} - // GetLinkFilePath returns the value of the field, resolving if necessary func (ev *Event) GetLinkFilePath() string { if ev.GetEventType().String() != "link" { @@ -461,14 +385,6 @@ func (ev *Event) GetLinkFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source) } -// GetLinkFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePathLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source)) -} - // GetLoadModuleFilePath returns the value of the field, resolving if necessary func (ev *Event) GetLoadModuleFilePath() string { if ev.GetEventType().String() != "load_module" { @@ -477,14 +393,6 @@ func (ev *Event) GetLoadModuleFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File) } -// GetLoadModuleFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePathLength() int { - if ev.GetEventType().String() != "load_module" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File)) -} - // GetMkdirFilePath returns the value of the field, resolving if necessary func (ev *Event) GetMkdirFilePath() string { if ev.GetEventType().String() != "mkdir" { @@ -493,14 +401,6 @@ func (ev *Event) GetMkdirFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File) } -// GetMkdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePathLength() int { - if ev.GetEventType().String() != "mkdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File)) -} - // GetMmapFilePath returns the value of the field, resolving if necessary func (ev *Event) GetMmapFilePath() string { if ev.GetEventType().String() != "mmap" { @@ -509,14 +409,6 @@ func (ev *Event) GetMmapFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File) } -// GetMmapFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePathLength() int { - if ev.GetEventType().String() != "mmap" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File)) -} - // GetMountMountpointPath returns the value of the field, resolving if necessary func (ev *Event) GetMountMountpointPath() string { if ev.GetEventType().String() != "mount" { @@ -541,14 +433,6 @@ func (ev *Event) GetOpenFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File) } -// GetOpenFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePathLength() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File)) -} - // GetProcessAncestorsCmdargv returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsCmdargv() []string { if ev.BaseEvent.ProcessContext == nil { @@ -612,27 +496,6 @@ func (ev *Event) GetProcessAncestorsFilePath() []string { return values } -// GetProcessAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePathLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetProcessAncestorsGid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsGid() []uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -696,27 +559,6 @@ func (ev *Event) GetProcessAncestorsInterpreterFilePath() []string { return values } -// GetProcessAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePathLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetProcessAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsPid() []uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -844,14 +686,6 @@ func (ev *Event) GetProcessFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) } -// GetProcessFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) -} - // GetProcessForkTime returns the value of the field, resolving if necessary func (ev *Event) GetProcessForkTime() time.Time { if ev.BaseEvent.ProcessContext == nil { @@ -887,14 +721,6 @@ func (ev *Event) GetProcessInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) } -// GetProcessInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent)) -} - // GetProcessParentCmdargv returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentCmdargv() []string { if ev.BaseEvent.ProcessContext == nil { @@ -940,17 +766,6 @@ func (ev *Event) GetProcessParentFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) } -// GetProcessParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) -} - // GetProcessParentGid returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentGid() uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -996,17 +811,6 @@ func (ev *Event) GetProcessParentInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) } -// GetProcessParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent)) -} - // GetProcessParentPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentPid() uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -1167,30 +971,6 @@ func (ev *Event) GetPtraceTraceeAncestorsFilePath() []string { return values } -// GetPtraceTraceeAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePathLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetPtraceTraceeAncestorsGid returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeAncestorsGid() []uint32 { if ev.GetEventType().String() != "ptrace" { @@ -1263,30 +1043,6 @@ func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePath() []string { return values } -// GetPtraceTraceeAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePathLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetPtraceTraceeAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeAncestorsPid() []uint32 { if ev.GetEventType().String() != "ptrace" { @@ -1441,17 +1197,6 @@ func (ev *Event) GetPtraceTraceeFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent) } -// GetPtraceTraceeFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent)) -} - // GetPtraceTraceeForkTime returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeForkTime() time.Time { if ev.GetEventType().String() != "ptrace" { @@ -1499,17 +1244,6 @@ func (ev *Event) GetPtraceTraceeInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) } -// GetPtraceTraceeInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent)) -} - // GetPtraceTraceeParentCmdargv returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeParentCmdargv() []string { if ev.GetEventType().String() != "ptrace" { @@ -1564,20 +1298,6 @@ func (ev *Event) GetPtraceTraceeParentFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent) } -// GetPtraceTraceeParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent)) -} - // GetPtraceTraceeParentGid returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeParentGid() uint32 { if ev.GetEventType().String() != "ptrace" { @@ -1632,20 +1352,6 @@ func (ev *Event) GetPtraceTraceeParentInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) } -// GetPtraceTraceeParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent)) -} - // GetPtraceTraceeParentPid returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeParentPid() uint32 { if ev.GetEventType().String() != "ptrace" { @@ -1766,14 +1472,6 @@ func (ev *Event) GetRemovexattrFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File) } -// GetRemovexattrFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePathLength() int { - if ev.GetEventType().String() != "removexattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File)) -} - // GetRenameFileDestinationPath returns the value of the field, resolving if necessary func (ev *Event) GetRenameFileDestinationPath() string { if ev.GetEventType().String() != "rename" { @@ -1782,14 +1480,6 @@ func (ev *Event) GetRenameFileDestinationPath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New) } -// GetRenameFileDestinationPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New)) -} - // GetRenameFilePath returns the value of the field, resolving if necessary func (ev *Event) GetRenameFilePath() string { if ev.GetEventType().String() != "rename" { @@ -1798,14 +1488,6 @@ func (ev *Event) GetRenameFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old) } -// GetRenameFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old)) -} - // GetRmdirFilePath returns the value of the field, resolving if necessary func (ev *Event) GetRmdirFilePath() string { if ev.GetEventType().String() != "rmdir" { @@ -1814,14 +1496,6 @@ func (ev *Event) GetRmdirFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File) } -// GetRmdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePathLength() int { - if ev.GetEventType().String() != "rmdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File)) -} - // GetSetxattrFilePath returns the value of the field, resolving if necessary func (ev *Event) GetSetxattrFilePath() string { if ev.GetEventType().String() != "setxattr" { @@ -1830,14 +1504,6 @@ func (ev *Event) GetSetxattrFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File) } -// GetSetxattrFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePathLength() int { - if ev.GetEventType().String() != "setxattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File)) -} - // GetSignalTargetAncestorsCmdargv returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetAncestorsCmdargv() []string { if ev.GetEventType().String() != "signal" { @@ -1910,30 +1576,6 @@ func (ev *Event) GetSignalTargetAncestorsFilePath() []string { return values } -// GetSignalTargetAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePathLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetSignalTargetAncestorsGid returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetAncestorsGid() []uint32 { if ev.GetEventType().String() != "signal" { @@ -2006,30 +1648,6 @@ func (ev *Event) GetSignalTargetAncestorsInterpreterFilePath() []string { return values } -// GetSignalTargetAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePathLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetSignalTargetAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetAncestorsPid() []uint32 { if ev.GetEventType().String() != "signal" { @@ -2184,17 +1802,6 @@ func (ev *Event) GetSignalTargetFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent) } -// GetSignalTargetFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent)) -} - // GetSignalTargetForkTime returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetForkTime() time.Time { if ev.GetEventType().String() != "signal" { @@ -2242,17 +1849,6 @@ func (ev *Event) GetSignalTargetInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) } -// GetSignalTargetInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent)) -} - // GetSignalTargetParentCmdargv returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetParentCmdargv() []string { if ev.GetEventType().String() != "signal" { @@ -2307,20 +1903,6 @@ func (ev *Event) GetSignalTargetParentFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent) } -// GetSignalTargetParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent)) -} - // GetSignalTargetParentGid returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetParentGid() uint32 { if ev.GetEventType().String() != "signal" { @@ -2375,20 +1957,6 @@ func (ev *Event) GetSignalTargetParentInterpreterFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) } -// GetSignalTargetParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent)) -} - // GetSignalTargetParentPid returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetParentPid() uint32 { if ev.GetEventType().String() != "signal" { @@ -2509,14 +2077,6 @@ func (ev *Event) GetSpliceFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File) } -// GetSpliceFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePathLength() int { - if ev.GetEventType().String() != "splice" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File)) -} - // GetTimestamp returns the value of the field, resolving if necessary func (ev *Event) GetTimestamp() time.Time { return ev.FieldHandlers.ResolveEventTime(ev, &ev.BaseEvent) @@ -2530,14 +2090,6 @@ func (ev *Event) GetUnlinkFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File) } -// GetUnlinkFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePathLength() int { - if ev.GetEventType().String() != "unlink" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File)) -} - // GetUtimesFilePath returns the value of the field, resolving if necessary func (ev *Event) GetUtimesFilePath() string { if ev.GetEventType().String() != "utimes" { @@ -2545,11 +2097,3 @@ func (ev *Event) GetUtimesFilePath() string { } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File) } - -// GetUtimesFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePathLength() int { - if ev.GetEventType().String() != "utimes" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File)) -} diff --git a/pkg/security/secl/model/field_accessors_windows.go b/pkg/security/secl/model/field_accessors_windows.go index 7985ac407f831d..dc67e0e8641105 100644 --- a/pkg/security/secl/model/field_accessors_windows.go +++ b/pkg/security/secl/model/field_accessors_windows.go @@ -83,17 +83,6 @@ func (ev *Event) GetExecFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) } -// GetExecFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePathLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent)) -} - // GetExecPid returns the value of the field, resolving if necessary func (ev *Event) GetExecPid() uint32 { if ev.GetEventType().String() != "exec" { @@ -168,17 +157,6 @@ func (ev *Event) GetExitFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) } -// GetExitFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePathLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent)) -} - // GetExitPid returns the value of the field, resolving if necessary func (ev *Event) GetExitPid() uint32 { if ev.GetEventType().String() != "exit" { @@ -243,27 +221,6 @@ func (ev *Event) GetProcessAncestorsFilePath() []string { return values } -// GetProcessAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePathLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next(ctx) - } - return values -} - // GetProcessAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsPid() []uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -338,14 +295,6 @@ func (ev *Event) GetProcessFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) } -// GetProcessFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) -} - // GetProcessParentEnvp returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentEnvp() []string { if ev.BaseEvent.ProcessContext == nil { @@ -374,17 +323,6 @@ func (ev *Event) GetProcessParentFilePath() string { return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) } -// GetProcessParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) -} - // GetProcessParentPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentPid() uint32 { if ev.BaseEvent.ProcessContext == nil { From d435477204ec0a6165f307e5eded9c142a8748e5 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Thu, 30 Jan 2025 09:47:02 -0800 Subject: [PATCH 79/97] remove process connection rates (#33557) --- pkg/process/checks/net.go | 35 --------------------- pkg/process/checks/process.go | 43 ++------------------------ pkg/process/checks/process_nix_test.go | 4 +-- pkg/process/checks/process_rt.go | 8 ++--- pkg/process/checks/process_test.go | 22 +------------ 5 files changed, 7 insertions(+), 105 deletions(-) diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 169cf139ac9951..b8ff2a0ff54e7e 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -32,7 +32,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/subscriptions" ) const ( @@ -71,8 +70,6 @@ type ConnectionsCheck struct { serviceExtractor *parser.ServiceExtractor processData *ProcessData - processConnRatesTransmitter subscriptions.Transmitter[ProcessConnRates] - localresolver *resolver.LocalResolver wmeta workloadmeta.Component @@ -81,9 +78,6 @@ type ConnectionsCheck struct { sysprobeClient *http.Client } -// ProcessConnRates describes connection rates for processes -type ProcessConnRates map[int32]*model.ProcessNetworks - // Init initializes a ConnectionsCheck instance. func (c *ConnectionsCheck) Init(syscfg *SysProbeConfig, hostInfo *HostInfo, _ bool) error { c.hostInfo = hostInfo @@ -176,8 +170,6 @@ func (c *ConnectionsCheck) Run(nextGroupID func() int32, _ *RunOptions) (RunResu // Resolve the Raddr side of connections for local containers c.localresolver.Resolve(conns) - c.notifyProcessConnRates(c.config, conns) - log.Debugf("collected connections in %s", time.Since(start)) c.npCollector.ScheduleConns(conns.Conns, conns.Dns) @@ -236,33 +228,6 @@ func (c *ConnectionsCheck) getConnections() (*model.Connections, error) { return netEncoding.GetUnmarshaler(contentType).Unmarshal(body) } -func (c *ConnectionsCheck) notifyProcessConnRates(config pkgconfigmodel.Reader, conns *model.Connections) { - if len(c.processConnRatesTransmitter.Chs) == 0 { - return - } - - connCheckIntervalS := int(GetInterval(config, ConnectionsCheckName) / time.Second) - - connRates := make(ProcessConnRates) - for _, c := range conns.Conns { - rates, ok := connRates[c.Pid] - if !ok { - connRates[c.Pid] = &model.ProcessNetworks{ConnectionRate: 1, BytesRate: float32(c.LastBytesReceived) + float32(c.LastBytesSent)} - continue - } - - rates.BytesRate += float32(c.LastBytesSent) + float32(c.LastBytesReceived) - rates.ConnectionRate++ - } - - for _, rates := range connRates { - rates.BytesRate /= float32(connCheckIntervalS) - rates.ConnectionRate /= float32(connCheckIntervalS) - } - - c.processConnRatesTransmitter.Notify(connRates) -} - func convertDNSEntry(dnstable map[string]*model.DNSDatabaseEntry, namemap map[string]int32, namedb *[]string, ip string, entry *model.DNSEntry) { dbentry := &model.DNSDatabaseEntry{ NameOffsets: make([]int32, 0, len(entry.Names)), diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go index add33a5f22d6ab..323de018caf6b8 100644 --- a/pkg/process/checks/process.go +++ b/pkg/process/checks/process.go @@ -16,7 +16,6 @@ import ( model "github.com/DataDog/agent-payload/v5/process" "github.com/shirou/gopsutil/v4/cpu" - "go.uber.org/atomic" "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" workloadmetacomp "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -32,7 +31,6 @@ import ( proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/subscriptions" ) const ( @@ -111,9 +109,6 @@ type ProcessCheck struct { checkCount uint32 skipAmount uint32 - lastConnRates *atomic.Pointer[ProcessConnRates] - connRatesReceiver subscriptions.Receiver[ProcessConnRates] - //nolint:revive // TODO(PROC) Fix revive linter lookupIdProbe *LookupIdProbe @@ -170,8 +165,6 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.ignoreZombieProcesses = p.config.GetBool(configIgnoreZombies) - p.initConnRates() - p.extractors = append(p.extractors, p.serviceExtractor) if !oneShot && workloadmeta.Enabled(p.config) { @@ -191,33 +184,6 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool return nil } -func (p *ProcessCheck) initConnRates() { - p.lastConnRates = atomic.NewPointer[ProcessConnRates](nil) - p.connRatesReceiver = subscriptions.NewReceiver[ProcessConnRates]() - - go p.updateConnRates() -} - -func (p *ProcessCheck) updateConnRates() { - for { - connRates, ok := <-p.connRatesReceiver.Ch - if !ok { - return - } - p.lastConnRates.Store(&connRates) - } -} - -func (p *ProcessCheck) getLastConnRates() ProcessConnRates { - if p.lastConnRates == nil { - return nil - } - if result := p.lastConnRates.Load(); result != nil { - return *result - } - return nil -} - // IsEnabled returns true if the check is enabled by configuration func (p *ProcessCheck) IsEnabled() bool { if p.config.GetBool("process_config.run_in_core_agent.enabled") && flavor.GetFlavor() == flavor.ProcessAgent { @@ -319,8 +285,7 @@ func (p *ProcessCheck) run(groupID int32, collectRealTime bool) (RunResult, erro collectorProcHints := p.generateHints() p.checkCount++ - connsRates := p.getLastConnRates() - procsByCtr := fmtProcesses(p.scrubber, p.disallowList, procs, p.lastProcs, pidToCid, cpuTimes[0], p.lastCPUTime, p.lastRun, connsRates, p.lookupIdProbe, p.ignoreZombieProcesses, p.serviceExtractor) + procsByCtr := fmtProcesses(p.scrubber, p.disallowList, procs, p.lastProcs, pidToCid, cpuTimes[0], p.lastCPUTime, p.lastRun, p.lookupIdProbe, p.ignoreZombieProcesses, p.serviceExtractor) messages, totalProcs, totalContainers := createProcCtrMessages(p.hostInfo, procsByCtr, containers, p.maxBatchSize, p.maxBatchBytes, groupID, p.networkID, collectorProcHints) // Store the last state for comparison on the next run. @@ -337,7 +302,7 @@ func (p *ProcessCheck) run(groupID int32, collectRealTime bool) (RunResult, erro if p.realtimeLastProcs != nil { // TODO: deduplicate chunking with RT collection - chunkedStats := fmtProcessStats(p.maxBatchSize, stats, p.realtimeLastProcs, pidToCid, cpuTimes[0], p.realtimeLastCPUTime, p.realtimeLastRun, connsRates) + chunkedStats := fmtProcessStats(p.maxBatchSize, stats, p.realtimeLastProcs, pidToCid, cpuTimes[0], p.realtimeLastCPUTime, p.realtimeLastRun) groupSize := len(chunkedStats) chunkedCtrStats := convertAndChunkContainers(containers, groupSize) @@ -487,7 +452,6 @@ func fmtProcesses( ctrByProc map[int]string, syst2, syst1 cpu.TimesStat, lastRun time.Time, - connRates ProcessConnRates, //nolint:revive // TODO(PROC) Fix revive linter lookupIdProbe *LookupIdProbe, zombiesIgnored bool, @@ -519,9 +483,6 @@ func fmtProcesses( ProcessContext: serviceExtractor.GetServiceContext(fp.Pid), } - if connRates != nil { - proc.Networks = connRates[fp.Pid] - } _, ok := procsByCtr[proc.ContainerId] if !ok { procsByCtr[proc.ContainerId] = make([]*model.Process, 0) diff --git a/pkg/process/checks/process_nix_test.go b/pkg/process/checks/process_nix_test.go index c819e7fddebd7d..ee03521cb4ecac 100644 --- a/pkg/process/checks/process_nix_test.go +++ b/pkg/process/checks/process_nix_test.go @@ -124,7 +124,7 @@ func TestBasicProcessMessages(t *testing.T) { useWindowsServiceName := true useImprovedAlgorithm := false ex := parser.NewServiceExtractor(serviceExtractorEnabled, useWindowsServiceName, useImprovedAlgorithm) - procs := fmtProcesses(procutil.NewDefaultDataScrubber(), disallowList, tc.processes, tc.processes, tc.pidToCid, syst2, syst1, lastRun, nil, nil, false, ex) + procs := fmtProcesses(procutil.NewDefaultDataScrubber(), disallowList, tc.processes, tc.processes, tc.pidToCid, syst2, syst1, lastRun, nil, false, ex) messages, totalProcs, totalContainers := createProcCtrMessages(hostInfo, procs, tc.containers, tc.maxSize, maxBatchBytes, int32(i), "nid", 0) assert.Equal(t, tc.expectedChunks, len(messages)) @@ -238,7 +238,7 @@ func TestContainerProcessChunking(t *testing.T) { useWindowsServiceName := true useImprovedAlgorithm := false ex := parser.NewServiceExtractor(serviceExtractorEnabled, useWindowsServiceName, useImprovedAlgorithm) - processes := fmtProcesses(procutil.NewDefaultDataScrubber(), nil, procsByPid, procsByPid, pidToCid, syst2, syst1, lastRun, nil, nil, false, ex) + processes := fmtProcesses(procutil.NewDefaultDataScrubber(), nil, procsByPid, procsByPid, pidToCid, syst2, syst1, lastRun, nil, false, ex) messages, totalProcs, totalContainers := createProcCtrMessages(hostInfo, processes, ctrs, tc.maxSize, maxBatchBytes, int32(i), "nid", 0) assert.Equal(t, tc.expectedProcCount, totalProcs) diff --git a/pkg/process/checks/process_rt.go b/pkg/process/checks/process_rt.go index e218a80fa732e5..9f5297bd13734b 100644 --- a/pkg/process/checks/process_rt.go +++ b/pkg/process/checks/process_rt.go @@ -19,7 +19,7 @@ import ( ) // runRealtime runs the realtime ProcessCheck to collect statistics about the running processes. -// Underying procutil.Probe is responsible for the actual implementation +// Underlying procutil.Probe is responsible for the actual implementation func (p *ProcessCheck) runRealtime(groupID int32) (RunResult, error) { cpuTimes, err := cpu.Times(false) if err != nil { @@ -62,7 +62,7 @@ func (p *ProcessCheck) runRealtime(groupID int32) (RunResult, error) { return CombinedRunResult{}, nil } - chunkedStats := fmtProcessStats(p.maxBatchSize, procs, p.realtimeLastProcs, pidToCid, cpuTimes[0], p.realtimeLastCPUTime, p.realtimeLastRun, p.getLastConnRates()) + chunkedStats := fmtProcessStats(p.maxBatchSize, procs, p.realtimeLastProcs, pidToCid, cpuTimes[0], p.realtimeLastCPUTime, p.realtimeLastRun) groupSize := len(chunkedStats) chunkedCtrStats := convertAndChunkContainers(containers, groupSize) @@ -96,7 +96,6 @@ func fmtProcessStats( pidToCid map[int]string, syst2, syst1 cpu.TimesStat, lastRun time.Time, - connRates ProcessConnRates, ) [][]*model.ProcessStat { chunked := make([][]*model.ProcessStat, 0) chunk := make([]*model.ProcessStat, 0, maxBatchSize) @@ -134,9 +133,6 @@ func fmtProcessStats( InvoluntaryCtxSwitches: uint64(fp.CtxSwitches.Involuntary), ContainerId: pidToCid[int(pid)], } - if connRates != nil { - stat.Networks = connRates[pid] - } chunk = append(chunk, stat) diff --git a/pkg/process/checks/process_test.go b/pkg/process/checks/process_test.go index d306809c51967d..30250de7a2f3a4 100644 --- a/pkg/process/checks/process_test.go +++ b/pkg/process/checks/process_test.go @@ -34,7 +34,6 @@ import ( metricsmock "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/subscriptions" ) func processCheckWithMockProbe(t *testing.T) (*ProcessCheck, *mocks.Probe) { @@ -386,25 +385,6 @@ func TestDisallowList(t *testing.T) { } } -func TestConnRates(t *testing.T) { - p := &ProcessCheck{} - - p.initConnRates() - - var transmitter subscriptions.Transmitter[ProcessConnRates] - transmitter.Chs = append(transmitter.Chs, p.connRatesReceiver.Ch) - - rates := ProcessConnRates{ - 1: &model.ProcessNetworks{}, - } - transmitter.Notify(rates) - - close(p.connRatesReceiver.Ch) - - assert.Eventually(t, func() bool { return p.getLastConnRates() != nil }, 10*time.Second, time.Millisecond) - assert.Equal(t, rates, p.getLastConnRates()) -} - func TestProcessCheckHints(t *testing.T) { processCheck, probe := processCheckWithMockProbe(t) @@ -476,7 +456,7 @@ func TestProcessWithNoCommandline(t *testing.T) { useWindowsServiceName := true useImprovedAlgorithm := false serviceExtractor := parser.NewServiceExtractor(serviceExtractorEnabled, useWindowsServiceName, useImprovedAlgorithm) - procs := fmtProcesses(procutil.NewDefaultDataScrubber(), disallowList, procMap, procMap, nil, syst2, syst1, lastRun, nil, nil, false, serviceExtractor) + procs := fmtProcesses(procutil.NewDefaultDataScrubber(), disallowList, procMap, procMap, nil, syst2, syst1, lastRun, nil, false, serviceExtractor) assert.Len(t, procs, 1) require.Len(t, procs[""], 1) From fdd0b7b812508d2176b50162920bc4f6d43b88db Mon Sep 17 00:00:00 2001 From: Vickenty Fesunov Date: Thu, 30 Jan 2025 18:47:11 +0100 Subject: [PATCH 80/97] AMLII-2255 Set SO_PASSCRED before binding the socket (#33533) Co-authored-by: Janine Chan <64388808+janine-c@users.noreply.github.com> --- comp/dogstatsd/listeners/uds_common.go | 25 ++++++----------- comp/dogstatsd/listeners/uds_datagram.go | 28 +++++++++++++------ comp/dogstatsd/listeners/uds_linux.go | 10 ++----- comp/dogstatsd/listeners/uds_nolinux.go | 5 ++-- comp/dogstatsd/listeners/uds_stream.go | 22 +++++++++++++-- .../notes/amlii-2255-670a004a90c8f786.yaml | 3 ++ 6 files changed, 55 insertions(+), 38 deletions(-) create mode 100644 releasenotes/notes/amlii-2255-670a004a90c8f786.yaml diff --git a/comp/dogstatsd/listeners/uds_common.go b/comp/dogstatsd/listeners/uds_common.go index f523bdacf54c3d..9bc99659d75c68 100644 --- a/comp/dogstatsd/listeners/uds_common.go +++ b/comp/dogstatsd/listeners/uds_common.go @@ -96,22 +96,17 @@ type netUnixConn interface { // CloseFunction is a function that closes a connection type CloseFunction func(unixConn netUnixConn) error -func setupUnixConn(conn netUnixConn, originDetection bool, config model.Reader) (bool, error) { +func setupUnixConn(conn syscall.RawConn, originDetection bool, address string) (bool, error) { if originDetection { err := enableUDSPassCred(conn) if err != nil { log.Errorf("dogstatsd-uds: error enabling origin detection: %s", err) originDetection = false } else { - log.Debugf("dogstatsd-uds: enabling origin detection on %s", conn.LocalAddr()) + log.Debugf("dogstatsd-uds: enabling origin detection on %s", address) } } - if rcvbuf := config.GetInt("dogstatsd_so_rcvbuf"); rcvbuf != 0 { - if err := conn.SetReadBuffer(rcvbuf); err != nil { - return originDetection, fmt.Errorf("could not set socket rcvbuf: %s", err) - } - } return originDetection, nil } @@ -150,9 +145,7 @@ func NewUDSOobPoolManager() *packets.PoolManager[[]byte] { } // NewUDSListener returns an idle UDS Statsd listener -func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { - originDetection := cfg.GetBool("dogstatsd_origin_detection") - +func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component, originDetection bool) (*UDSListener, error) { listener := &UDSListener{ OriginDetection: originDetection, packetOut: packetOut, @@ -212,12 +205,6 @@ func (l *UDSListener) handleConnection(conn netUnixConn, closeFunc CloseFunction l.telemetryStore.tlmUDSConnections.Dec(tlmListenerID, l.transport) }() - var err error - l.OriginDetection, err = setupUnixConn(conn, l.OriginDetection, l.config) - if err != nil { - return err - } - t1 := time.Now() var t2 time.Time log.Debugf("dogstatsd-uds: starting to handle %s", conn.LocalAddr()) @@ -234,6 +221,12 @@ func (l *UDSListener) handleConnection(conn netUnixConn, closeFunc CloseFunction } } + if rcvbuf := l.config.GetInt("dogstatsd_so_rcvbuf"); rcvbuf != 0 { + if err := conn.SetReadBuffer(rcvbuf); err != nil { + log.Warnf("could not set socket rcvbuf: %s", err) + } + } + for { var n int var oobn int diff --git a/comp/dogstatsd/listeners/uds_datagram.go b/comp/dogstatsd/listeners/uds_datagram.go index 9a5c220a4f214e..d0b9516307648a 100644 --- a/comp/dogstatsd/listeners/uds_datagram.go +++ b/comp/dogstatsd/listeners/uds_datagram.go @@ -6,8 +6,10 @@ package listeners import ( + "context" "fmt" "net" + "syscall" "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -31,22 +33,36 @@ func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolMana socketPath := cfg.GetString("dogstatsd_socket") transport := "unixgram" - address, err := setupSocketBeforeListen(socketPath, transport) + _, err := setupSocketBeforeListen(socketPath, transport) if err != nil { return nil, err } - conn, err := net.ListenUnixgram(transport, address) + originDetection := cfg.GetBool("dogstatsd_origin_detection") + + conf := net.ListenConfig{ + Control: func(_, address string, c syscall.RawConn) (err error) { + originDetection, err = setupUnixConn(c, originDetection, address) + return + }, + } + + connGeneric, err := conf.ListenPacket(context.Background(), transport, socketPath) if err != nil { return nil, fmt.Errorf("can't listen: %s", err) } + conn, ok := connGeneric.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("unexpected return type from ListenPacket, expected UnixConn: %#v", connGeneric) + } + err = setSocketWriteOnly(socketPath) if err != nil { return nil, err } - l, err := NewUDSListener(packetOut, sharedPacketPoolManager, sharedOobPoolManager, cfg, capture, transport, wmeta, pidMap, telemetryStore, packetsTelemetryStore, telemetryComponent) + l, err := NewUDSListener(packetOut, sharedPacketPoolManager, sharedOobPoolManager, cfg, capture, transport, wmeta, pidMap, telemetryStore, packetsTelemetryStore, telemetryComponent, originDetection) if err != nil { return nil, err } @@ -56,12 +72,6 @@ func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolMana conn: conn, } - // Setup origin detection early - l.OriginDetection, err = setupUnixConn(conn, l.OriginDetection, l.config) - if err != nil { - return nil, err - } - log.Infof("dogstatsd-uds: %s successfully initialized", conn.LocalAddr()) return listener, nil } diff --git a/comp/dogstatsd/listeners/uds_linux.go b/comp/dogstatsd/listeners/uds_linux.go index cdb728cb9085c8..1abfbcd0a30d70 100644 --- a/comp/dogstatsd/listeners/uds_linux.go +++ b/comp/dogstatsd/listeners/uds_linux.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "strconv" + "syscall" "time" "golang.org/x/sys/unix" @@ -40,14 +41,9 @@ func getUDSAncillarySize() int { // enableUDSPassCred enables credential passing from the kernel for origin detection. // That flag can be ignored if origin dection is disabled. -func enableUDSPassCred(conn netUnixConn) error { - rawconn, err := conn.SyscallConn() - if err != nil { - return err - } - +func enableUDSPassCred(rawconn syscall.RawConn) error { var e error - err = rawconn.Control(func(fd uintptr) { + err := rawconn.Control(func(fd uintptr) { e = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_PASSCRED, 1) }) if err != nil { diff --git a/comp/dogstatsd/listeners/uds_nolinux.go b/comp/dogstatsd/listeners/uds_nolinux.go index 4acb1755730c90..86331fffa9cfbc 100644 --- a/comp/dogstatsd/listeners/uds_nolinux.go +++ b/comp/dogstatsd/listeners/uds_nolinux.go @@ -9,6 +9,7 @@ package listeners import ( "errors" + "syscall" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" @@ -25,9 +26,7 @@ func getUDSAncillarySize() int { } // enableUDSPassCred returns a "not implemented" error on non-linux hosts -// -//nolint:revive // TODO(AML) Fix revive linter -func enableUDSPassCred(_ netUnixConn) error { +func enableUDSPassCred(_ syscall.RawConn) error { return ErrLinuxOnly } diff --git a/comp/dogstatsd/listeners/uds_stream.go b/comp/dogstatsd/listeners/uds_stream.go index d259cac1d80985..a5c6a8c1dda1e2 100644 --- a/comp/dogstatsd/listeners/uds_stream.go +++ b/comp/dogstatsd/listeners/uds_stream.go @@ -6,9 +6,11 @@ package listeners import ( + "context" "fmt" "net" "strings" + "syscall" "time" "github.com/DataDog/datadog-agent/comp/core/telemetry" @@ -33,22 +35,36 @@ func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManage socketPath := cfg.GetString("dogstatsd_stream_socket") transport := "unix" - address, err := setupSocketBeforeListen(socketPath, transport) + _, err := setupSocketBeforeListen(socketPath, transport) if err != nil { return nil, err } - conn, err := net.ListenUnix(transport, address) + originDetection := cfg.GetBool("dogstatsd_origin_detection") + + conf := net.ListenConfig{ + Control: func(_, address string, c syscall.RawConn) (err error) { + originDetection, err = setupUnixConn(c, originDetection, address) + return + }, + } + + unixListener, err := conf.Listen(context.Background(), transport, socketPath) if err != nil { return nil, fmt.Errorf("can't listen: %s", err) } + conn, ok := unixListener.(*net.UnixListener) + if !ok { + return nil, fmt.Errorf("unexpected return type from Listen, expected UnixConn: %#v", unixListener) + } + err = setSocketWriteOnly(socketPath) if err != nil { return nil, err } - l, err := NewUDSListener(packetOut, sharedPacketPoolManager, sharedOobPacketPoolManager, cfg, capture, transport, wmeta, pidMap, telemetryStore, packetsTelemetryStore, telemetry) + l, err := NewUDSListener(packetOut, sharedPacketPoolManager, sharedOobPacketPoolManager, cfg, capture, transport, wmeta, pidMap, telemetryStore, packetsTelemetryStore, telemetry, originDetection) if err != nil { return nil, err } diff --git a/releasenotes/notes/amlii-2255-670a004a90c8f786.yaml b/releasenotes/notes/amlii-2255-670a004a90c8f786.yaml new file mode 100644 index 00000000000000..b84995f9cb1996 --- /dev/null +++ b/releasenotes/notes/amlii-2255-670a004a90c8f786.yaml @@ -0,0 +1,3 @@ +fixes: + - | + Fixed a bug in the DogStatsD Unix socket server that caused metrics to miss container tags and the Agent to report ``matched PID for the process is 0`` warnings. From 84718dfd24173209096229810a65bf720b9850e3 Mon Sep 17 00:00:00 2001 From: Duncan Harvey <35278470+duncanpharvey@users.noreply.github.com> Date: Thu, 30 Jan 2025 12:47:18 -0500 Subject: [PATCH 81/97] Add /dd_tracer/node/node_modules to NODE_PATH for serverless-init (#33589) --- cmd/serverless-init/mode/initcontainer_mode.go | 3 ++- cmd/serverless-init/mode/initcontainer_mode_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/serverless-init/mode/initcontainer_mode.go b/cmd/serverless-init/mode/initcontainer_mode.go index 9d45da246cf8d4..06d2d0c2d512ad 100644 --- a/cmd/serverless-init/mode/initcontainer_mode.go +++ b/cmd/serverless-init/mode/initcontainer_mode.go @@ -98,7 +98,8 @@ type Tracer struct { func instrumentNode() { currNodePath := os.Getenv("NODE_PATH") - os.Setenv("NODE_PATH", addToString(currNodePath, ":", "/dd_tracer/node/")) + legacyDatadogNodePath := addToString(currNodePath, ":", "/dd_tracer/node/") + os.Setenv("NODE_PATH", addToString(legacyDatadogNodePath, ":", "/dd_tracer/node/node_modules")) currNodeOptions := os.Getenv("NODE_OPTIONS") os.Setenv("NODE_OPTIONS", addToString(currNodeOptions, " ", "--require dd-trace/init")) diff --git a/cmd/serverless-init/mode/initcontainer_mode_test.go b/cmd/serverless-init/mode/initcontainer_mode_test.go index fe4efdfda1b2ee..ec6286fe44096a 100644 --- a/cmd/serverless-init/mode/initcontainer_mode_test.go +++ b/cmd/serverless-init/mode/initcontainer_mode_test.go @@ -90,7 +90,7 @@ func TestNodeTracerIsAutoInstrumented(t *testing.T) { autoInstrumentTracer(fs) assert.Equal(t, "--require dd-trace/init", os.Getenv("NODE_OPTIONS")) - assert.Equal(t, "/dd_tracer/node/", os.Getenv("NODE_PATH")) + assert.Equal(t, "/dd_tracer/node/:/dd_tracer/node/node_modules", os.Getenv("NODE_PATH")) } func TestDotNetTracerIsAutoInstrumented(t *testing.T) { From 78991f6b4f90368bb9b7d51816beb5dd16b89d4a Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Thu, 30 Jan 2025 20:35:40 +0100 Subject: [PATCH 82/97] [CWS] fix SetFieldValue test and remove Error from context (#33420) --- .../generators/accessors/accessors.go | 65 +- .../generators/accessors/accessors.tmpl | 23 +- .../generators/accessors/common/types.go | 2 + pkg/security/probe/discarders_linux.go | 4 +- pkg/security/resolvers/dentry/resolver.go | 15 +- pkg/security/secl/compiler/eval/context.go | 9 +- pkg/security/secl/model/accessors_unix.go | 2816 +++++++---------- pkg/security/secl/model/accessors_windows.go | 44 +- pkg/security/secl/model/iterator.go | 12 +- pkg/security/secl/model/model.go | 11 +- pkg/security/secl/model/model_helpers_unix.go | 8 + pkg/security/secl/model/model_test.go | 4 +- pkg/security/secl/model/model_unix.go | 26 +- pkg/security/secl/model/model_windows.go | 5 + pkg/security/seclwin/model/accessors_win.go | 44 +- pkg/security/seclwin/model/iterator.go | 12 +- pkg/security/seclwin/model/model.go | 11 +- pkg/security/seclwin/model/model_win.go | 5 + pkg/security/serializers/serializers_linux.go | 4 +- pkg/security/tests/main_linux.go | 1 + pkg/security/tests/process_test.go | 38 +- pkg/security/tests/schemas.go | 3 +- 22 files changed, 1402 insertions(+), 1760 deletions(-) diff --git a/pkg/security/generators/accessors/accessors.go b/pkg/security/generators/accessors/accessors.go index 7975e8351358f0..b7ae180325cc26 100644 --- a/pkg/security/generators/accessors/accessors.go +++ b/pkg/security/generators/accessors/accessors.go @@ -233,7 +233,11 @@ func handleEmbedded(module *common.Module, name, prefix, event string, restricte } // handleNonEmbedded adds non-embedded fields to list of all possible (but not necessarily exposed) SECL fields of the module -func handleNonEmbedded(module *common.Module, field seclField, prefixedFieldName, event string, restrictedTo []string, fieldType string, isPointer, isArray bool) { +func handleNonEmbedded(module *common.Module, field seclField, aliasPrefix, alias, prefixedFieldName, event string, restrictedTo []string, fieldType string, isPointer, isArray bool) { + if aliasPrefix != "" { + alias = aliasPrefix + "." + alias + } + module.AllFields[prefixedFieldName] = &common.StructField{ Name: prefixedFieldName, Event: event, @@ -242,6 +246,9 @@ func handleNonEmbedded(module *common.Module, field seclField, prefixedFieldName IsArray: isArray, Check: field.check, RestrictedTo: restrictedTo, + SetHandler: field.setHandler, + AliasPrefix: aliasPrefix, + Alias: alias, } } @@ -283,8 +290,10 @@ func handleIterator(module *common.Module, field seclField, fieldType, iterator, Helper: field.helper, SkipADResolution: field.skipADResolution, Check: field.check, + SetHandler: field.setHandler, Ref: field.ref, RestrictedTo: restrictedTo, + ReadOnly: field.readOnly, } lengthField := addLengthOpField(module, alias, module.Iterators[alias]) @@ -324,12 +333,14 @@ func handleFieldWithHandler(module *common.Module, field seclField, aliasPrefix, SkipADResolution: field.skipADResolution, IsOrigTypePtr: isPointer, Check: field.check, + SetHandler: field.setHandler, Alias: alias, AliasPrefix: aliasPrefix, GettersOnly: field.gettersOnly, GenGetters: field.genGetters, Ref: field.ref, RestrictedTo: restrictedTo, + ReadOnly: field.readOnly, } module.Fields[alias] = newStructField @@ -379,11 +390,13 @@ type seclField struct { lengthField bool weight int64 check string + setHandler string exposedAtEventRootOnly bool // fields that should only be exposed at the root of an event, i.e. `parent` should not be exposed for an `ancestor` of a process containerStructName string gettersOnly bool // a field that is not exposed via SECL, but still has an accessor generated genGetters bool ref string + readOnly bool } func parseFieldDef(def string) (seclField, error) { @@ -418,6 +431,8 @@ func parseFieldDef(def string) (seclField, error) { field.iterator = value case "check": field.check = value + case "set_handler": + field.setHandler = value case "opts": for _, opt := range strings.Split(value, "|") { switch opt { @@ -434,6 +449,8 @@ func parseFieldDef(def string) (seclField, error) { field.exposedAtEventRootOnly = true case "gen_getters": field.genGetters = true + case "readonly": + field.readOnly = true } } } @@ -542,7 +559,9 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa } for _, seclField := range fields { - handleNonEmbedded(module, seclField, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) + alias := seclField.name + + handleNonEmbedded(module, seclField, aliasPrefix, alias, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) if seclFieldIterator := seclField.iterator; seclFieldIterator != "" { fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) @@ -565,7 +584,6 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa continue } - alias := seclField.name if isBasicType(fieldType) { handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, fieldEvent, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) } else { @@ -592,7 +610,9 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa } } for _, seclField := range gettersOnlyFields { - handleNonEmbedded(module, seclField, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) + alias := seclField.name + + handleNonEmbedded(module, seclField, aliasPrefix, alias, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) if seclFieldIterator := seclField.iterator; seclFieldIterator != "" { fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) @@ -615,7 +635,6 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa continue } - alias := seclField.name if isBasicTypeForGettersOnly(fieldType) { handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, fieldEvent, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) } else { @@ -955,6 +974,36 @@ func getChecks(allFields map[string]*common.StructField, field *common.StructFie return checks } +func getSetHandler(allFields map[string]*common.StructField, field *common.StructField) string { + var handler string + + fqn := field.Alias + + name := field.Name + for name != "" { + field := allFields[name] + if field == nil { + break + } + + if field.SetHandler != "" { + ptr := "" + if !field.IsOrigTypePtr { + ptr = "&" + } + return fmt.Sprintf(`%s(%sev.%s, "%s", value)`, field.SetHandler, ptr, field.Name, strings.Replace(fqn, field.Alias+".", "", -1)) + } + + idx := strings.LastIndex(name, ".") + if idx == -1 { + break + } + name = name[:idx] + } + + return handler +} + func getHandlers(allFields map[string]*common.StructField) map[string]string { handlers := make(map[string]string) @@ -1022,6 +1071,10 @@ func getFieldReflectType(field *common.StructField) string { return "" } +func isReadOnly(field *common.StructField) bool { + return field.IsLength || field.Helper || field.ReadOnly +} + var funcMap = map[string]interface{}{ "TrimPrefix": strings.TrimPrefix, "TrimSuffix": strings.TrimSuffix, @@ -1039,6 +1092,8 @@ var funcMap = map[string]interface{}{ "AddSuffixToFuncPrototype": addSuffixToFuncPrototype, "GetFieldRestrictions": getFieldRestrictions, "GetFieldReflectType": getFieldReflectType, + "GetSetHandler": getSetHandler, + "IsReadOnly": isReadOnly, } //go:embed accessors.tmpl diff --git a/pkg/security/generators/accessors/accessors.tmpl b/pkg/security/generators/accessors/accessors.tmpl index 040ded33fa8ab4..9500a0091b99db 100644 --- a/pkg/security/generators/accessors/accessors.tmpl +++ b/pkg/security/generators/accessors/accessors.tmpl @@ -67,13 +67,12 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{- if and $Field.Iterator (not $Field.IsIterator) }} EvalFnc: func(ctx *eval.Context) []{{$Field.ReturnType}} { ctx.AppendResolvedField(field) - {{if $Field.Handler}} - ev := ctx.Event.(*Event) - {{end}} {{$Checks := $Field | GetChecks $.AllFields}} - iterator := &{{$Field.Iterator.ReturnType}}{} + ev := ctx.Event.(*Event) + + iterator := &{{$Field.Iterator.ReturnType}}{Root: ev.{{$Field.Iterator.Name}}} if regID != "" { {{if $Field.Iterator.IsOrigTypePtr}} @@ -94,7 +93,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} {{$Check = $SubName | printf "element%s"}} if !{{$Check}}() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []{{$Field.ReturnType}}{ {{$Field.GetDefaultScalarReturnValue}} } } {{end}} @@ -144,7 +142,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} {{$Check = $SubName | printf "current%s"}} if !{{$Check}}() { - ctx.Error = &eval.ErrNotSupported{Field: field} {{if $Field.GetArrayPrefix}} return nil {{else}} @@ -192,7 +189,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{range $Check := $Checks}} {{$Check = $Check | printf "ev.%s"}} if !{{$Check}}() { - ctx.Error = &eval.ErrNotSupported{Field: field} return {{$Field.GetDefaultReturnValue}} } {{end}} @@ -283,9 +279,6 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { ctx := eval.NewContext(ev) value := evaluator.Eval(ctx) - if ctx.Error != nil { - return nil, ctx.Error - } return value, nil } @@ -319,11 +312,19 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { {{end}} {{$FieldName := $Field | BuildFirstAccessor $.AllFields}} + {{$SetHandler := $Field | GetSetHandler $.AllFields}} + case "{{$Name}}": {{- $Field | NewField $.AllFields}} - {{if $Field.IsLength}} + {{if $Field | IsReadOnly}} return &eval.ErrFieldReadOnly{Field: "{{$Name}}"} {{else}} + {{if ne $SetHandler ""}} + cont, err := {{$SetHandler}} + if err != nil || !cont { + return err + } + {{end}} {{- if eq $Field.BasicType "string"}} {{- if $Field.IsArray}} switch rv := value.(type) { diff --git a/pkg/security/generators/accessors/common/types.go b/pkg/security/generators/accessors/common/types.go index b942e90f892711..00fdcd1c581a11 100644 --- a/pkg/security/generators/accessors/common/types.go +++ b/pkg/security/generators/accessors/common/types.go @@ -65,6 +65,7 @@ type StructField struct { CommentText string OpOverrides string Check string + SetHandler string Alias string AliasPrefix string GettersOnly bool @@ -72,6 +73,7 @@ type StructField struct { Ref string RestrictedTo []string IsIterator bool + ReadOnly bool } // GetEvaluatorType returns the evaluator type name diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index ad34db42a0eb9d..0b997b68805470 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -400,7 +400,7 @@ func (id *inodeDiscarders) discardParentInode(req *erpc.Request, rs *rules.RuleS for i := 0; i < discarderDepth; i++ { key, err := id.dentryResolver.GetParent(parentKey) - if err != nil || dentry.IsFakeInode(pathKey.Inode) { + if err != nil || model.IsFakeInode(pathKey.Inode) { if i == 0 { return false, 0, 0, err } @@ -451,7 +451,7 @@ func filenameDiscarderWrapper(eventType model.EventType, getter inodeEventGetter isDiscarded, _, parentInode, err := probe.inodeDiscarders.discardParentInode(probe.erpcRequest, rs, eventType, field, filename, fileEvent.PathKey, event.TimestampRaw) if !isDiscarded && !isDeleted && err == nil { - if !dentry.IsFakeInode(fileEvent.PathKey.Inode) { + if !model.IsFakeInode(fileEvent.PathKey.Inode) { seclog.Tracef("Apply `%s.file.path` inode discarder for event `%s`, inode: %d(%s)", eventType, eventType, fileEvent.PathKey.Inode, filename) // not able to discard the parent then only discard the filename diff --git a/pkg/security/resolvers/dentry/resolver.go b/pkg/security/resolvers/dentry/resolver.go index 61cb7672999ca6..551706d4629501 100644 --- a/pkg/security/resolvers/dentry/resolver.go +++ b/pkg/security/resolvers/dentry/resolver.go @@ -36,10 +36,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/utils/cache" ) -var ( - fakeInodeMSW = uint64(0xdeadc001) -) - type counterEntry struct { resolutionType string resolution string @@ -126,11 +122,6 @@ func allERPCRet() []eRPCRet { return []eRPCRet{eRPCok, eRPCCacheMiss, eRPCBufferSize, eRPCWritePageFault, eRPCTailCallError, eRPCReadPageFault, eRPCUnknownError} } -// IsFakeInode returns whether the given inode is a fake inode -func IsFakeInode(inode uint64) bool { - return inode>>32 == fakeInodeMSW -} - // SendStats sends the dentry resolver metrics func (dr *Resolver) SendStats() error { for counterEntry, counter := range dr.hitsCounters { @@ -251,7 +242,7 @@ func (dr *Resolver) ResolveNameFromMap(pathKey model.PathKey) (string, error) { name := pathLeaf.GetName() - if !IsFakeInode(pathKey.Inode) { + if !model.IsFakeInode(pathKey.Inode) { cacheEntry := newPathEntry(pathLeaf.Parent, name) dr.cacheInode(pathKey, cacheEntry) } @@ -380,7 +371,7 @@ func (dr *Resolver) ResolveFromMap(pathKey model.PathKey, cache bool) (string, e } // do not cache fake path keys in the case of rename events - if !IsFakeInode(pathKey.Inode) && cache { + if !model.IsFakeInode(pathKey.Inode) && cache { dr.keys = append(dr.keys, pathKey) dr.cacheNameEntries = append(dr.cacheNameEntries, name) } @@ -546,7 +537,7 @@ func (dr *Resolver) ResolveFromERPC(pathKey model.PathKey, cache bool) (string, dr.filenameParts = append(dr.filenameParts, segment) i += len(segment) + 1 - if !IsFakeInode(pathKey.Inode) && cache { + if !model.IsFakeInode(pathKey.Inode) && cache { dr.keys = append(dr.keys, pathKey) dr.cacheNameEntries = append(dr.cacheNameEntries, segment) } diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go index d1eb055aef48f8..89d1b055a59c3b 100644 --- a/pkg/security/secl/compiler/eval/context.go +++ b/pkg/security/secl/compiler/eval/context.go @@ -34,15 +34,11 @@ type Context struct { // rule register Registers map[RegisterID]int - now time.Time - IteratorCountCache map[string]int + // internal + now time.Time resolvedFields []string - - IteratorCounters map[Field]int - - Error error } // Now return and cache the `now` timestamp @@ -62,7 +58,6 @@ func (c *Context) SetEvent(evt Event) { func (c *Context) Reset() { c.Event = nil c.now = time.Time{} - c.Error = nil clear(c.StringCache) clear(c.IPNetCache) diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index 1ae4c89db74a49..153f362a2a86f6 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -1602,7 +1602,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.CTime) @@ -1616,7 +1615,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.FileEvent) @@ -1630,7 +1628,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.GID) @@ -1644,7 +1641,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1658,7 +1654,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.FileEvent) @@ -1672,7 +1667,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1686,7 +1680,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.PathKey.Inode) @@ -1700,7 +1693,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.Mode) @@ -1714,7 +1706,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.MTime) @@ -1728,7 +1719,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.PathKey.MountID) @@ -1743,7 +1733,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent) @@ -1768,7 +1757,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.FileEvent) @@ -1782,7 +1770,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.FileEvent) @@ -1796,7 +1783,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.FileEvent) @@ -1811,7 +1797,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) @@ -1836,7 +1821,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.FileEvent.FileFields)) @@ -1850,7 +1834,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.UID) @@ -1864,7 +1847,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1938,7 +1920,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -1952,7 +1933,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -1966,7 +1946,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -1980,7 +1959,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -1994,7 +1972,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2008,7 +1985,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -2022,7 +1998,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -2036,7 +2011,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -2050,7 +2024,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -2064,7 +2037,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -2079,7 +2051,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2104,7 +2075,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2118,7 +2088,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2132,7 +2101,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2147,7 +2115,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2172,7 +2139,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields)) @@ -2186,7 +2152,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -2200,7 +2165,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -2604,7 +2568,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.CTime) @@ -2618,7 +2581,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.FileEvent) @@ -2632,7 +2594,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.GID) @@ -2646,7 +2607,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2660,7 +2620,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.FileEvent) @@ -2674,7 +2633,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2688,7 +2646,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.PathKey.Inode) @@ -2702,7 +2659,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.Mode) @@ -2716,7 +2672,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.MTime) @@ -2730,7 +2685,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.PathKey.MountID) @@ -2745,7 +2699,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent) @@ -2770,7 +2723,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.FileEvent) @@ -2784,7 +2736,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.FileEvent) @@ -2798,7 +2749,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.FileEvent) @@ -2813,7 +2763,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) @@ -2838,7 +2787,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.FileEvent.FileFields)) @@ -2852,7 +2800,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.UID) @@ -2866,7 +2813,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2940,7 +2886,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -2954,7 +2899,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -2968,7 +2912,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -2982,7 +2925,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -2996,7 +2938,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3010,7 +2951,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -3024,7 +2964,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -3038,7 +2977,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -3052,7 +2990,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -3066,7 +3003,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -3081,7 +3017,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3106,7 +3041,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3120,7 +3054,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3134,7 +3067,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3149,7 +3081,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3174,7 +3105,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields)) @@ -3188,7 +3118,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -3202,7 +3131,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -4834,7 +4762,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.CIDRArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []net.IPNet { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4860,7 +4789,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &FlowsIterator{} + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4885,7 +4814,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4910,7 +4840,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4935,7 +4866,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4960,7 +4892,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -4985,7 +4918,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5010,7 +4944,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5035,7 +4970,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5070,7 +5006,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.CIDRArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []net.IPNet { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5096,7 +5033,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &FlowsIterator{} + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5121,7 +5058,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &FlowsIterator{} + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{Root: ev.NetworkFlowMonitor.Flows} if regID != "" { value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { @@ -5632,7 +5570,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5657,7 +5595,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5682,7 +5620,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5707,7 +5645,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5732,7 +5670,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5757,7 +5695,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5781,7 +5719,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5805,7 +5744,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5829,7 +5769,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5853,7 +5794,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5877,7 +5819,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5902,7 +5845,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5927,7 +5870,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5952,7 +5895,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -5976,7 +5919,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6001,7 +5945,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6026,7 +5970,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6050,7 +5994,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6074,7 +6019,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6099,7 +6045,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6124,7 +6070,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6149,7 +6095,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6173,7 +6119,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6197,7 +6144,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6221,14 +6169,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -6239,7 +6187,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -6254,14 +6201,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) @@ -6272,7 +6218,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6286,14 +6231,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) @@ -6304,7 +6249,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.GID) @@ -6319,14 +6263,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -6337,7 +6280,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -6352,14 +6294,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) @@ -6370,7 +6311,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6385,14 +6325,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -6403,7 +6342,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -6417,14 +6355,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -6435,7 +6373,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -6449,14 +6386,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -6467,7 +6404,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -6481,14 +6417,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -6499,7 +6435,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -6513,14 +6448,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -6531,7 +6466,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -6547,14 +6481,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) @@ -6565,7 +6498,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6581,7 +6513,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6606,14 +6538,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) @@ -6624,7 +6555,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6639,14 +6569,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -6657,7 +6586,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6672,14 +6600,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -6690,7 +6617,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6706,14 +6632,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) @@ -6724,7 +6649,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) @@ -6740,7 +6664,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6765,14 +6689,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) @@ -6783,7 +6706,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) @@ -6797,14 +6719,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) @@ -6815,7 +6737,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.UID) @@ -6830,14 +6751,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -6848,7 +6768,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -6862,7 +6781,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6886,7 +6806,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6910,7 +6831,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6934,7 +6856,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6958,7 +6881,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -6982,7 +6906,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7006,14 +6931,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -7024,7 +6949,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -7039,14 +6963,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7057,7 +6980,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7071,14 +6993,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -7089,7 +7011,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -7104,14 +7025,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7122,7 +7042,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7137,14 +7056,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7155,7 +7073,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7170,14 +7087,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7188,7 +7104,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7202,14 +7117,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -7220,7 +7135,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -7234,14 +7148,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -7252,7 +7166,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -7266,14 +7179,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -7284,7 +7197,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -7298,14 +7210,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -7316,7 +7228,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -7332,14 +7243,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7350,7 +7260,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7366,7 +7275,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7391,14 +7300,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7409,7 +7317,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7424,14 +7331,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7442,7 +7348,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7457,14 +7362,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7475,7 +7379,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7491,14 +7394,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7509,7 +7411,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -7525,7 +7426,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7550,14 +7451,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -7568,7 +7468,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -7582,14 +7481,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -7600,7 +7499,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -7615,14 +7513,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7633,7 +7530,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "BaseEvent.ProcessContext.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -7647,7 +7543,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7671,7 +7568,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7696,7 +7594,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7730,7 +7628,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7754,7 +7653,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7778,7 +7678,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7802,7 +7703,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7826,7 +7728,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7850,7 +7753,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7875,7 +7779,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7900,7 +7804,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -7925,7 +7829,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -8191,7 +8095,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -8205,7 +8108,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8219,7 +8121,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID) @@ -8233,7 +8134,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -8247,7 +8147,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8261,7 +8160,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -8275,7 +8173,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -8289,7 +8186,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -8303,7 +8199,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -8317,7 +8212,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -8332,7 +8226,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8357,7 +8250,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8371,7 +8263,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8385,7 +8276,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8400,7 +8290,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8425,7 +8314,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields)) @@ -8439,7 +8327,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID) @@ -8453,7 +8340,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -8527,7 +8413,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -8541,7 +8426,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8555,7 +8439,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -8569,7 +8452,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8583,7 +8465,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8597,7 +8478,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8611,7 +8491,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -8625,7 +8504,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -8639,7 +8517,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -8653,7 +8530,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -8668,7 +8544,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8693,7 +8568,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8707,7 +8581,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8721,7 +8594,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8736,7 +8608,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8761,7 +8632,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -8775,7 +8645,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -8789,7 +8658,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8833,7 +8701,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8847,7 +8714,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8861,7 +8727,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8875,7 +8740,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8889,7 +8753,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8903,7 +8766,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8917,7 +8779,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.AUID) @@ -8931,7 +8792,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapEffective) @@ -8945,7 +8805,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapPermitted) @@ -8959,7 +8818,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode) @@ -8973,7 +8831,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID) @@ -8987,7 +8844,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -9001,7 +8857,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -9015,7 +8870,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -9029,7 +8883,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Comm @@ -9043,7 +8896,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9057,7 +8909,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -9071,7 +8922,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EGID) @@ -9085,7 +8935,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.EGroup @@ -9099,7 +8948,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9113,7 +8961,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9127,7 +8974,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9141,7 +8987,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EUID) @@ -9155,7 +9000,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.EUser @@ -9169,11 +9013,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime) @@ -9187,11 +9029,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9205,11 +9045,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID) @@ -9223,11 +9061,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -9241,11 +9077,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9259,11 +9093,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -9277,11 +9109,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode) @@ -9295,11 +9125,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode) @@ -9313,11 +9141,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime) @@ -9331,11 +9157,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID) @@ -9350,11 +9174,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9379,11 +9201,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9397,11 +9217,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9415,11 +9233,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9434,11 +9250,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9463,11 +9277,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields)) @@ -9481,11 +9293,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID) @@ -9499,11 +9309,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -9517,7 +9325,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSGID) @@ -9531,7 +9338,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.FSGroup @@ -9545,7 +9351,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSUID) @@ -9559,7 +9364,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.FSUser @@ -9573,7 +9377,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.GID) @@ -9587,7 +9390,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.Group @@ -9601,11 +9403,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -9619,11 +9419,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9637,11 +9435,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -9655,11 +9451,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9673,11 +9467,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9691,11 +9483,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9709,11 +9499,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -9727,11 +9515,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -9745,11 +9531,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -9763,11 +9547,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -9782,11 +9564,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9811,11 +9591,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9829,11 +9607,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9847,11 +9623,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9866,11 +9640,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9895,11 +9667,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -9913,11 +9683,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -9931,11 +9699,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9949,7 +9715,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.BaseEvent.ProcessContext.Parent.IsExec @@ -9963,7 +9728,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.BaseEvent.ProcessContext.Parent.PIDContext.IsKworker @@ -9977,7 +9741,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9991,7 +9754,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -10005,7 +9767,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -10019,7 +9780,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Tid) @@ -10033,7 +9793,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.TTYName @@ -10047,7 +9806,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.UID) @@ -10061,7 +9819,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.User @@ -10075,7 +9832,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -10089,7 +9845,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -10103,7 +9858,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -10226,7 +9980,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10251,7 +10005,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10276,7 +10030,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10301,7 +10055,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10326,7 +10080,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10351,7 +10105,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10375,7 +10129,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10399,7 +10154,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10423,7 +10179,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10447,7 +10204,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10471,7 +10229,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10496,7 +10255,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10521,7 +10280,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10546,7 +10305,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10570,7 +10329,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10595,7 +10355,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10620,7 +10380,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10644,7 +10404,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10668,7 +10429,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10693,7 +10455,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10718,7 +10480,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10743,7 +10505,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10767,7 +10529,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10791,7 +10554,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -10815,14 +10579,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -10833,7 +10597,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -10848,14 +10611,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) @@ -10866,7 +10628,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) @@ -10880,14 +10641,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) @@ -10898,7 +10659,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.GID) @@ -10913,14 +10673,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -10931,7 +10690,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -10946,14 +10704,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) @@ -10964,7 +10721,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) @@ -10979,14 +10735,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -10997,7 +10752,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -11011,14 +10765,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -11029,7 +10783,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -11043,14 +10796,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -11061,7 +10814,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -11075,14 +10827,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -11093,7 +10845,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -11107,14 +10858,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -11125,7 +10876,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -11141,14 +10891,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) @@ -11159,7 +10908,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) @@ -11175,7 +10923,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11200,14 +10948,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) @@ -11218,7 +10965,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) @@ -11233,14 +10979,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -11251,7 +10996,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -11266,14 +11010,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -11284,7 +11027,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -11300,14 +11042,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) @@ -11318,7 +11059,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) @@ -11334,7 +11074,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11359,14 +11099,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) @@ -11377,7 +11116,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) @@ -11391,14 +11129,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) @@ -11409,7 +11147,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.UID) @@ -11424,14 +11161,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -11442,7 +11178,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -11456,7 +11191,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11480,7 +11216,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11504,7 +11241,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11528,7 +11266,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11552,7 +11291,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11576,7 +11316,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11600,14 +11341,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -11618,7 +11359,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -11633,14 +11373,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11651,7 +11390,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11665,14 +11403,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -11683,7 +11421,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -11698,14 +11435,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -11716,7 +11452,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -11731,14 +11466,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11749,7 +11483,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11764,14 +11497,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -11782,7 +11514,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -11796,14 +11527,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -11814,7 +11545,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -11828,14 +11558,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -11846,7 +11576,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -11860,14 +11589,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -11878,7 +11607,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -11892,14 +11620,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -11910,7 +11638,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -11926,14 +11653,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11944,7 +11670,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -11960,7 +11685,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -11985,14 +11710,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12003,7 +11727,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12018,14 +11741,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12036,7 +11758,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12051,14 +11772,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12069,7 +11789,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12085,14 +11804,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12103,7 +11821,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -12119,7 +11836,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12144,14 +11861,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -12162,7 +11878,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -12176,14 +11891,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -12194,7 +11909,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -12209,14 +11923,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -12227,7 +11940,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "PTrace.Tracee.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -12241,7 +11953,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12265,7 +11978,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12290,7 +12004,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12324,7 +12038,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12348,7 +12063,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12372,7 +12088,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12396,7 +12113,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12420,7 +12138,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12444,7 +12163,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12469,7 +12189,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12494,7 +12214,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12519,7 +12239,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.PTrace.Tracee.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -12785,7 +12505,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.CTime) @@ -12799,7 +12518,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12813,7 +12531,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.GID) @@ -12827,7 +12544,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -12841,7 +12557,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12855,7 +12570,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -12869,7 +12583,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode) @@ -12883,7 +12596,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode) @@ -12897,7 +12609,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.MTime) @@ -12911,7 +12622,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID) @@ -12926,7 +12636,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12951,7 +12660,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12965,7 +12673,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12979,7 +12686,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12994,7 +12700,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -13019,7 +12724,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields)) @@ -13033,7 +12737,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.UID) @@ -13047,7 +12750,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -13121,7 +12823,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -13135,7 +12836,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13149,7 +12849,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -13163,7 +12862,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -13177,7 +12875,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13191,7 +12888,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -13205,7 +12901,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -13219,7 +12914,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -13233,7 +12927,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -13247,7 +12940,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -13262,7 +12954,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13287,7 +12978,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13301,7 +12991,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13315,7 +13004,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13330,7 +13018,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13355,7 +13042,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields)) @@ -13369,7 +13055,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -13383,7 +13068,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -13427,7 +13111,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) @@ -13441,7 +13124,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.PTrace.Tracee.Parent) @@ -13455,7 +13137,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.PTrace.Tracee.Parent) @@ -13469,7 +13150,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.PTrace.Tracee.Parent) @@ -13483,7 +13163,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent) @@ -13497,7 +13176,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent) @@ -13511,7 +13189,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.AUID) @@ -13525,7 +13202,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.CapEffective) @@ -13539,7 +13215,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.CapPermitted) @@ -13553,7 +13228,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.Inode) @@ -13567,7 +13241,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.MountID) @@ -13581,7 +13254,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13595,7 +13267,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13609,7 +13280,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13623,7 +13293,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Comm @@ -13637,7 +13306,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.PTrace.Tracee.Parent) @@ -13651,7 +13319,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.PTrace.Tracee.Parent)) @@ -13665,7 +13332,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.EGID) @@ -13679,7 +13345,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.EGroup @@ -13693,7 +13358,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.PTrace.Tracee.Parent) @@ -13707,7 +13371,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.PTrace.Tracee.Parent) @@ -13721,7 +13384,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.PTrace.Tracee.Parent) @@ -13735,7 +13397,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.EUID) @@ -13749,7 +13410,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.EUser @@ -13763,11 +13423,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.CTime) @@ -13781,11 +13439,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13799,11 +13455,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.GID) @@ -13817,11 +13471,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -13835,11 +13487,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13853,11 +13503,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -13871,11 +13519,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode) @@ -13889,11 +13535,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode) @@ -13907,11 +13551,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.MTime) @@ -13925,11 +13567,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID) @@ -13944,11 +13584,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13973,11 +13611,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13991,11 +13627,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -14009,11 +13643,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -14028,11 +13660,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -14057,11 +13687,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields)) @@ -14075,11 +13703,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.UID) @@ -14093,11 +13719,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -14111,7 +13735,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.FSGID) @@ -14125,7 +13748,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.FSGroup @@ -14139,7 +13761,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.FSUID) @@ -14153,7 +13774,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.FSUser @@ -14167,7 +13787,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.GID) @@ -14181,7 +13800,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.Group @@ -14195,11 +13813,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -14213,11 +13829,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14231,11 +13845,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -14249,11 +13861,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -14267,11 +13877,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14285,11 +13893,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -14303,11 +13909,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -14321,11 +13925,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -14339,11 +13941,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -14357,11 +13957,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -14376,11 +13974,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14405,11 +14001,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14423,11 +14017,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14441,11 +14033,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14460,11 +14050,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14489,11 +14077,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -14507,11 +14093,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -14525,11 +14109,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -14543,7 +14125,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.PTrace.Tracee.Parent.IsExec @@ -14557,7 +14138,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.PTrace.Tracee.Parent.PIDContext.IsKworker @@ -14571,7 +14151,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.PTrace.Tracee.Parent) @@ -14585,7 +14164,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PIDContext.Pid) @@ -14599,7 +14177,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PPid) @@ -14613,7 +14190,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PIDContext.Tid) @@ -14627,7 +14203,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.TTYName @@ -14641,7 +14216,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.UID) @@ -14655,7 +14229,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.User @@ -14669,7 +14242,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -14683,7 +14255,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -14697,7 +14268,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -16110,7 +15680,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16135,7 +15705,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16160,7 +15730,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16185,7 +15755,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16210,7 +15780,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16235,7 +15805,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16259,7 +15829,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16283,7 +15854,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16307,7 +15879,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16331,7 +15904,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16355,7 +15929,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16380,7 +15955,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16405,7 +15980,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16430,7 +16005,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16454,7 +16029,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16479,7 +16055,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16504,7 +16080,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16528,7 +16104,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16552,7 +16129,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16577,7 +16155,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16602,7 +16180,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16627,7 +16205,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16651,7 +16229,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16675,7 +16254,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -16699,14 +16279,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -16717,7 +16297,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -16732,14 +16311,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) @@ -16750,7 +16328,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) @@ -16764,14 +16341,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) @@ -16782,7 +16359,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.GID) @@ -16797,14 +16373,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -16815,7 +16390,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -16830,14 +16404,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) @@ -16848,7 +16421,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) @@ -16863,14 +16435,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -16881,7 +16452,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -16895,14 +16465,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -16913,7 +16483,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -16927,14 +16496,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -16945,7 +16514,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -16959,14 +16527,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -16977,7 +16545,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -16991,14 +16558,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -17009,7 +16576,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -17025,14 +16591,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) @@ -17043,7 +16608,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) @@ -17059,7 +16623,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17084,14 +16648,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) @@ -17102,7 +16665,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) @@ -17117,14 +16679,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -17135,7 +16696,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -17150,14 +16710,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) @@ -17168,7 +16727,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) @@ -17184,14 +16742,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) @@ -17202,7 +16759,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) @@ -17218,7 +16774,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17243,14 +16799,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) @@ -17261,7 +16816,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) @@ -17275,14 +16829,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) @@ -17293,7 +16847,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.FileEvent.FileFields.UID) @@ -17308,14 +16861,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) @@ -17326,7 +16878,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) @@ -17340,7 +16891,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17364,7 +16916,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17388,7 +16941,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17412,7 +16966,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17436,7 +16991,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17460,7 +17016,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17484,14 +17041,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -17502,7 +17059,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -17517,14 +17073,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17535,7 +17090,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17549,14 +17103,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -17567,7 +17121,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -17582,14 +17135,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -17600,7 +17152,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -17615,14 +17166,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17633,7 +17183,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIteratorArray(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return nil } return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17648,14 +17197,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -17666,7 +17214,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -17680,14 +17227,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -17698,7 +17245,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -17712,14 +17258,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -17730,7 +17276,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -17744,14 +17289,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -17762,7 +17307,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -17776,14 +17320,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -17794,7 +17338,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -17810,14 +17353,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17828,7 +17370,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17844,7 +17385,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -17869,14 +17410,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17887,7 +17427,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17902,14 +17441,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17920,7 +17458,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17935,14 +17472,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17953,7 +17489,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17969,14 +17504,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -17987,7 +17521,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -18003,7 +17536,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18028,14 +17561,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -18046,7 +17578,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -18060,14 +17591,14 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -18078,7 +17609,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -18093,14 +17623,13 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { return nil } if !element.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -18111,7 +17640,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } results := newIterator(iterator, "Signal.Target.Ancestor", ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { if !current.ProcessContext.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -18125,7 +17653,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18149,7 +17678,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18174,7 +17704,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18208,7 +17738,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18232,7 +17763,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18256,7 +17788,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18280,7 +17813,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18304,7 +17838,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18328,7 +17863,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18353,7 +17889,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18378,7 +17914,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18403,7 +17939,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.Signal.Target.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -18669,7 +18205,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.CTime) @@ -18683,7 +18218,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.FileEvent) @@ -18697,7 +18231,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.GID) @@ -18711,7 +18244,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -18725,7 +18257,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.FileEvent) @@ -18739,7 +18270,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -18753,7 +18283,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.Inode) @@ -18767,7 +18296,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.Mode) @@ -18781,7 +18309,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.MTime) @@ -18795,7 +18322,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.MountID) @@ -18810,7 +18336,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent) @@ -18835,7 +18360,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.FileEvent) @@ -18849,7 +18373,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.FileEvent) @@ -18863,7 +18386,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.FileEvent) @@ -18878,7 +18400,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent) @@ -18903,7 +18424,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.FileEvent.FileFields)) @@ -18917,7 +18437,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.UID) @@ -18931,7 +18450,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -19005,7 +18523,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -19019,7 +18536,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19033,7 +18549,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -19047,7 +18562,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -19061,7 +18575,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19075,7 +18588,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -19089,7 +18601,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -19103,7 +18614,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -19117,7 +18627,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -19131,7 +18640,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -19146,7 +18654,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19171,7 +18678,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19185,7 +18691,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19199,7 +18704,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19214,7 +18718,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -19239,7 +18742,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields)) @@ -19253,7 +18755,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -19267,7 +18768,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -19311,7 +18811,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) @@ -19325,7 +18824,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Signal.Target.Parent) @@ -19339,7 +18837,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Signal.Target.Parent) @@ -19353,7 +18850,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Signal.Target.Parent) @@ -19367,7 +18863,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent) @@ -19381,7 +18876,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent) @@ -19395,7 +18889,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.AUID) @@ -19409,7 +18902,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.CapEffective) @@ -19423,7 +18915,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.CapPermitted) @@ -19437,7 +18928,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.CGroup.CGroupFile.Inode) @@ -19451,7 +18941,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.CGroup.CGroupFile.MountID) @@ -19465,7 +18954,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Parent.CGroup) @@ -19479,7 +18967,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Parent.CGroup) @@ -19493,7 +18980,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Parent.CGroup) @@ -19507,7 +18993,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Comm @@ -19521,7 +19006,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Signal.Target.Parent) @@ -19535,7 +19019,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Signal.Target.Parent)) @@ -19549,7 +19032,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.EGID) @@ -19563,7 +19045,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.EGroup @@ -19577,7 +19058,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Signal.Target.Parent) @@ -19591,7 +19071,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Signal.Target.Parent) @@ -19605,7 +19084,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Signal.Target.Parent) @@ -19619,7 +19097,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.EUID) @@ -19633,7 +19110,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.EUser @@ -19647,11 +19123,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.CTime) @@ -19665,11 +19139,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19683,11 +19155,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.GID) @@ -19701,11 +19171,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19719,11 +19187,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19737,11 +19203,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19755,11 +19219,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode) @@ -19773,11 +19235,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.Mode) @@ -19791,11 +19251,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.MTime) @@ -19809,11 +19267,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID) @@ -19828,11 +19284,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19857,11 +19311,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19875,11 +19327,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19893,11 +19343,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19912,11 +19360,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19941,11 +19387,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.FileEvent.FileFields)) @@ -19959,11 +19403,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.UID) @@ -19977,11 +19419,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19995,7 +19435,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.FSGID) @@ -20009,7 +19448,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.FSGroup @@ -20023,7 +19461,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.FSUID) @@ -20037,7 +19474,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.FSUser @@ -20051,7 +19487,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.GID) @@ -20065,7 +19500,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.Group @@ -20079,11 +19513,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -20097,11 +19529,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20115,11 +19545,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -20133,11 +19561,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -20151,11 +19577,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20169,11 +19593,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -20187,11 +19609,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -20205,11 +19625,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -20223,11 +19641,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -20241,11 +19657,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -20260,11 +19674,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20289,11 +19701,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20307,11 +19717,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20325,11 +19733,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20344,11 +19750,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -20373,11 +19777,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -20391,11 +19793,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -20409,11 +19809,9 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -20427,7 +19825,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.Signal.Target.Parent.IsExec @@ -20441,7 +19838,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.Signal.Target.Parent.PIDContext.IsKworker @@ -20455,7 +19851,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Signal.Target.Parent) @@ -20469,7 +19864,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PIDContext.Pid) @@ -20483,7 +19877,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PPid) @@ -20497,7 +19890,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PIDContext.Tid) @@ -20511,7 +19903,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.TTYName @@ -20525,7 +19916,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.UID) @@ -20539,7 +19929,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.User @@ -20553,7 +19942,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Parent.UserSession) @@ -20567,7 +19955,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Parent.UserSession) @@ -20581,7 +19968,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Parent.UserSession) @@ -22882,9 +22268,6 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } ctx := eval.NewContext(ev) value := evaluator.Eval(ctx) - if ctx.Error != nil { - return nil, ctx.Error - } return value, nil } func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { @@ -26111,15 +25494,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chdir.file.path.length": return &eval.ErrFieldReadOnly{Field: "chdir.file.path.length"} case "chdir.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "chdir.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "chdir.file.rights"} - } - ev.Chdir.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "chdir.file.rights"} case "chdir.file.uid": rv, ok := value.(int) if !ok { @@ -26278,15 +25653,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chmod.file.path.length": return &eval.ErrFieldReadOnly{Field: "chmod.file.path.length"} case "chmod.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "chmod.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "chmod.file.rights"} - } - ev.Chmod.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "chmod.file.rights"} case "chmod.file.uid": rv, ok := value.(int) if !ok { @@ -26466,15 +25833,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chown.file.path.length": return &eval.ErrFieldReadOnly{Field: "chown.file.path.length"} case "chown.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "chown.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "chown.file.rights"} - } - ev.Chown.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "chown.file.rights"} case "chown.file.uid": rv, ok := value.(int) if !ok { @@ -26716,38 +26075,17 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exec.args"} - } - ev.Exec.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "exec.args"} case "exec.args_flags": if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } - switch rv := value.(type) { - case string: - ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv) - case []string: - ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "exec.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "exec.args_flags"} case "exec.args_options": if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } - switch rv := value.(type) { - case string: - ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv) - case []string: - ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "exec.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "exec.args_options"} case "exec.args_truncated": if ev.Exec.Process == nil { ev.Exec.Process = &Process{} @@ -27137,15 +26475,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exec.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "exec.file.rights"} - } - ev.Exec.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "exec.file.rights"} case "exec.file.uid": if ev.Exec.Process == nil { ev.Exec.Process = &Process{} @@ -27230,6 +26560,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.change_time"} @@ -27240,6 +26574,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.filesystem"} @@ -27250,6 +26588,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.gid"} @@ -27260,6 +26602,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.group"} @@ -27270,6 +26616,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.Exec.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Exec.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -27283,6 +26633,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.in_upper_layer"} @@ -27293,6 +26647,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.inode"} @@ -27303,6 +26661,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.mode"} @@ -27316,6 +26678,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.modification_time"} @@ -27326,6 +26692,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.mount_id"} @@ -27336,6 +26706,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.name"} @@ -27351,6 +26725,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.name"} @@ -27361,6 +26739,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.source_version"} @@ -27371,6 +26753,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.version"} @@ -27381,6 +26767,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.path"} @@ -27396,19 +26786,15 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "exec.interpreter.file.rights"} - } - ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "exec.interpreter.file.rights"} case "exec.interpreter.file.uid": if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.uid"} @@ -27419,6 +26805,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exec.Process == nil { ev.Exec.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exec.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.user"} @@ -27559,38 +26949,17 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exit.args"} - } - ev.Exit.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "exit.args"} case "exit.args_flags": if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } - switch rv := value.(type) { - case string: - ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv) - case []string: - ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "exit.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "exit.args_flags"} case "exit.args_options": if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } - switch rv := value.(type) { - case string: - ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv) - case []string: - ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "exit.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "exit.args_options"} case "exit.args_truncated": if ev.Exit.Process == nil { ev.Exit.Process = &Process{} @@ -27994,15 +27363,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exit.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "exit.file.rights"} - } - ev.Exit.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "exit.file.rights"} case "exit.file.uid": if ev.Exit.Process == nil { ev.Exit.Process = &Process{} @@ -28087,6 +27448,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.change_time"} @@ -28097,6 +27462,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.filesystem"} @@ -28107,6 +27476,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.gid"} @@ -28117,6 +27490,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.group"} @@ -28127,6 +27504,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.Exit.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Exit.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -28140,6 +27521,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.in_upper_layer"} @@ -28150,6 +27535,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.inode"} @@ -28160,6 +27549,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.mode"} @@ -28173,6 +27566,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.modification_time"} @@ -28183,6 +27580,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.mount_id"} @@ -28193,6 +27594,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.name"} @@ -28208,6 +27613,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.name"} @@ -28218,6 +27627,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.source_version"} @@ -28228,6 +27641,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.version"} @@ -28238,6 +27655,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.path"} @@ -28253,19 +27674,15 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "exit.interpreter.file.rights"} - } - ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "exit.interpreter.file.rights"} case "exit.interpreter.file.uid": if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.uid"} @@ -28276,6 +27693,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Exit.Process == nil { ev.Exit.Process = &Process{} } + cont, err := SetInterpreterFields(&ev.Exit.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.user"} @@ -28584,15 +28005,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.destination.path.length": return &eval.ErrFieldReadOnly{Field: "link.file.destination.path.length"} case "link.file.destination.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "link.file.destination.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "link.file.destination.rights"} - } - ev.Link.Target.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "link.file.destination.rights"} case "link.file.destination.uid": rv, ok := value.(int) if !ok { @@ -28716,15 +28129,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.path.length": return &eval.ErrFieldReadOnly{Field: "link.file.path.length"} case "link.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "link.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "link.file.rights"} - } - ev.Link.Source.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "link.file.rights"} case "link.file.uid": rv, ok := value.(int) if !ok { @@ -28900,15 +28305,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "load_module.file.path.length": return &eval.ErrFieldReadOnly{Field: "load_module.file.path.length"} case "load_module.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "load_module.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "load_module.file.rights"} - } - ev.LoadModule.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "load_module.file.rights"} case "load_module.file.uid": rv, ok := value.(int) if !ok { @@ -29074,15 +28471,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mkdir.file.path.length": return &eval.ErrFieldReadOnly{Field: "mkdir.file.path.length"} case "mkdir.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "mkdir.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "mkdir.file.rights"} - } - ev.Mkdir.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "mkdir.file.rights"} case "mkdir.file.uid": rv, ok := value.(int) if !ok { @@ -29234,15 +28623,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mmap.file.path.length": return &eval.ErrFieldReadOnly{Field: "mmap.file.path.length"} case "mmap.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "mmap.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "mmap.file.rights"} - } - ev.MMap.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "mmap.file.rights"} case "mmap.file.uid": rv, ok := value.(int) if !ok { @@ -29774,15 +29155,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open.file.path.length": return &eval.ErrFieldReadOnly{Field: "open.file.path.length"} case "open.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "open.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "open.file.rights"} - } - ev.Open.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "open.file.rights"} case "open.file.uid": rv, ok := value.(int) if !ok { @@ -29945,12 +29318,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args"} - } - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "process.ancestors.args"} case "process.ancestors.args_flags": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -29958,15 +29326,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.ancestors.args_flags"} case "process.ancestors.args_options": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -29974,15 +29334,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.ancestors.args_options"} case "process.ancestors.args_truncated": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -30489,15 +29841,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.ancestors.file.rights"} - } - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.ancestors.file.rights"} case "process.ancestors.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -30609,6 +29953,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.change_time"} @@ -30622,6 +29970,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.filesystem"} @@ -30635,6 +29987,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.gid"} @@ -30648,6 +30004,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.group"} @@ -30661,6 +30021,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -30677,6 +30041,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.in_upper_layer"} @@ -30690,6 +30058,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.inode"} @@ -30703,6 +30075,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.mode"} @@ -30719,6 +30095,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.modification_time"} @@ -30732,6 +30112,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.mount_id"} @@ -30745,6 +30129,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.name"} @@ -30766,6 +30154,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.name"} @@ -30779,6 +30171,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.source_version"} @@ -30792,6 +30188,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.version"} @@ -30805,6 +30205,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.path"} @@ -30826,15 +30230,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.ancestors.interpreter.file.rights"} - } - ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.ancestors.interpreter.file.rights"} case "process.ancestors.interpreter.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -30842,6 +30238,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.uid"} @@ -30855,6 +30255,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Ancestor == nil { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.user"} @@ -31032,38 +30436,17 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.args"} - } - ev.BaseEvent.ProcessContext.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "process.args"} case "process.args_flags": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.args_flags"} case "process.args_options": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.args_options"} case "process.args_truncated": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -31453,15 +30836,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.file.rights"} - } - ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.file.rights"} case "process.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -31546,6 +30921,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.change_time"} @@ -31556,6 +30935,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.filesystem"} @@ -31566,6 +30949,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.gid"} @@ -31576,6 +30963,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.group"} @@ -31586,6 +30977,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -31599,6 +30994,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.in_upper_layer"} @@ -31609,6 +31008,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.inode"} @@ -31619,6 +31022,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.mode"} @@ -31632,6 +31039,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.modification_time"} @@ -31642,6 +31053,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.mount_id"} @@ -31652,6 +31067,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.name"} @@ -31667,6 +31086,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.name"} @@ -31677,6 +31100,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.source_version"} @@ -31687,6 +31114,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.version"} @@ -31697,6 +31128,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.path"} @@ -31712,19 +31147,15 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.interpreter.file.rights"} - } - ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.interpreter.file.rights"} case "process.interpreter.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.uid"} @@ -31735,6 +31166,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.user"} @@ -31778,12 +31213,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.parent.args"} - } - ev.BaseEvent.ProcessContext.Parent.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "process.parent.args"} case "process.parent.args_flags": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -31791,15 +31221,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.parent.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.parent.args_flags"} case "process.parent.args_options": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -31807,15 +31229,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv) - case []string: - ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "process.parent.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "process.parent.args_options"} case "process.parent.args_truncated": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -32322,15 +31736,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.parent.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.parent.file.rights"} - } - ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.parent.file.rights"} case "process.parent.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -32442,6 +31848,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.change_time"} @@ -32455,6 +31865,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.filesystem"} @@ -32468,6 +31882,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.gid"} @@ -32481,6 +31899,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.group"} @@ -32494,6 +31916,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Hashes, rv) @@ -32510,6 +31936,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.in_upper_layer"} @@ -32523,6 +31953,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.inode"} @@ -32536,6 +31970,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.mode"} @@ -32552,6 +31990,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.modification_time"} @@ -32565,6 +32007,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.mount_id"} @@ -32578,6 +32024,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.name"} @@ -32599,6 +32049,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.name"} @@ -32612,6 +32066,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.source_version"} @@ -32625,6 +32083,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.version"} @@ -32638,6 +32100,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.path"} @@ -32659,15 +32125,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "process.parent.interpreter.file.rights"} - } - ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "process.parent.interpreter.file.rights"} case "process.parent.interpreter.file.uid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -32675,6 +32133,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.uid"} @@ -32688,6 +32150,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.BaseEvent.ProcessContext.Parent == nil { ev.BaseEvent.ProcessContext.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.BaseEvent.ProcessContext.Parent.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.user"} @@ -32967,12 +32433,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args"} - } - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.args"} case "ptrace.tracee.ancestors.args_flags": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -32980,15 +32441,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.args_flags"} case "ptrace.tracee.ancestors.args_options": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -32996,15 +32449,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.args_options"} case "ptrace.tracee.ancestors.args_truncated": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -33511,15 +32956,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.file.rights"} - } - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.file.rights"} case "ptrace.tracee.ancestors.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -33631,6 +33068,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.change_time"} @@ -33644,6 +33085,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.filesystem"} @@ -33657,6 +33102,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.gid"} @@ -33670,6 +33119,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.group"} @@ -33683,6 +33136,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -33699,6 +33156,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.in_upper_layer"} @@ -33712,6 +33173,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.inode"} @@ -33725,6 +33190,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.mode"} @@ -33741,6 +33210,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.modification_time"} @@ -33754,6 +33227,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.mount_id"} @@ -33767,6 +33244,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.name"} @@ -33788,6 +33269,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.name"} @@ -33801,6 +33286,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.source_version"} @@ -33814,6 +33303,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.version"} @@ -33827,6 +33320,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.path"} @@ -33848,15 +33345,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.interpreter.file.rights"} - } - ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.interpreter.file.rights"} case "ptrace.tracee.ancestors.interpreter.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -33864,6 +33353,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.uid"} @@ -33877,6 +33370,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Ancestor == nil { ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.user"} @@ -34054,38 +33551,17 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args"} - } - ev.PTrace.Tracee.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.args"} case "ptrace.tracee.args_flags": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv) - case []string: - ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.args_flags"} case "ptrace.tracee.args_options": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv) - case []string: - ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.args_options"} case "ptrace.tracee.args_truncated": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -34475,15 +33951,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.file.rights"} - } - ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.file.rights"} case "ptrace.tracee.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -34568,6 +34036,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.change_time"} @@ -34578,6 +34050,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.filesystem"} @@ -34588,6 +34064,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.gid"} @@ -34598,6 +34078,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.group"} @@ -34608,6 +34092,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -34621,6 +34109,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.in_upper_layer"} @@ -34631,6 +34123,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.inode"} @@ -34641,6 +34137,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.mode"} @@ -34654,6 +34154,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.modification_time"} @@ -34664,6 +34168,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.mount_id"} @@ -34674,6 +34182,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.name"} @@ -34689,6 +34201,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.name"} @@ -34699,6 +34215,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.source_version"} @@ -34709,6 +34229,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.version"} @@ -34719,6 +34243,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.path"} @@ -34734,19 +34262,15 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.interpreter.file.rights"} - } - ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.interpreter.file.rights"} case "ptrace.tracee.interpreter.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.uid"} @@ -34757,6 +34281,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.user"} @@ -34800,12 +34328,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args"} - } - ev.PTrace.Tracee.Parent.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.parent.args"} case "ptrace.tracee.parent.args_flags": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -34813,15 +34336,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv) - case []string: - ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.parent.args_flags"} case "ptrace.tracee.parent.args_options": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -34829,15 +34344,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv) - case []string: - ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.parent.args_options"} case "ptrace.tracee.parent.args_truncated": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -35344,15 +34851,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.file.rights"} - } - ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.parent.file.rights"} case "ptrace.tracee.parent.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -35464,6 +34963,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.change_time"} @@ -35477,6 +34980,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.filesystem"} @@ -35490,6 +34997,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.gid"} @@ -35503,6 +35014,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.group"} @@ -35516,6 +35031,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Hashes, rv) @@ -35532,6 +35051,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.in_upper_layer"} @@ -35545,6 +35068,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.inode"} @@ -35558,6 +35085,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.mode"} @@ -35574,6 +35105,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.modification_time"} @@ -35587,6 +35122,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.mount_id"} @@ -35600,6 +35139,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.name"} @@ -35621,6 +35164,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.name"} @@ -35634,6 +35181,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.source_version"} @@ -35647,6 +35198,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.version"} @@ -35660,6 +35215,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.path"} @@ -35681,15 +35240,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.interpreter.file.rights"} - } - ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.parent.interpreter.file.rights"} case "ptrace.tracee.parent.interpreter.file.uid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -35697,6 +35248,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.uid"} @@ -35710,6 +35265,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.PTrace.Tracee.Parent == nil { ev.PTrace.Tracee.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.PTrace.Tracee.Parent.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.user"} @@ -36098,15 +35657,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "removexattr.file.path.length": return &eval.ErrFieldReadOnly{Field: "removexattr.file.path.length"} case "removexattr.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "removexattr.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "removexattr.file.rights"} - } - ev.RemoveXAttr.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "removexattr.file.rights"} case "removexattr.file.uid": rv, ok := value.(int) if !ok { @@ -36251,15 +35802,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.path.length": return &eval.ErrFieldReadOnly{Field: "rename.file.destination.path.length"} case "rename.file.destination.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "rename.file.destination.rights"} - } - ev.Rename.New.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "rename.file.destination.rights"} case "rename.file.destination.uid": rv, ok := value.(int) if !ok { @@ -36383,15 +35926,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.path.length": return &eval.ErrFieldReadOnly{Field: "rename.file.path.length"} case "rename.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "rename.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "rename.file.rights"} - } - ev.Rename.Old.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "rename.file.rights"} case "rename.file.uid": rv, ok := value.(int) if !ok { @@ -36543,15 +36078,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rmdir.file.path.length": return &eval.ErrFieldReadOnly{Field: "rmdir.file.path.length"} case "rmdir.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "rmdir.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "rmdir.file.rights"} - } - ev.Rmdir.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "rmdir.file.rights"} case "rmdir.file.uid": rv, ok := value.(int) if !ok { @@ -36822,15 +36349,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "setxattr.file.path.length": return &eval.ErrFieldReadOnly{Field: "setxattr.file.path.length"} case "setxattr.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "setxattr.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "setxattr.file.rights"} - } - ev.SetXAttr.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "setxattr.file.rights"} case "setxattr.file.uid": rv, ok := value.(int) if !ok { @@ -36873,12 +36392,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args"} - } - ev.Signal.Target.Ancestor.ProcessContext.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.args"} case "signal.target.ancestors.args_flags": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -36886,15 +36400,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.args_flags"} case "signal.target.ancestors.args_options": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -36902,15 +36408,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv) - case []string: - ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.args_options"} case "signal.target.ancestors.args_truncated": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -37417,15 +36915,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.file.rights"} - } - ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.file.rights"} case "signal.target.ancestors.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -37537,6 +37027,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.change_time"} @@ -37550,6 +37044,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.filesystem"} @@ -37563,6 +37061,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.gid"} @@ -37576,6 +37078,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.group"} @@ -37589,6 +37095,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -37605,6 +37115,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.in_upper_layer"} @@ -37618,6 +37132,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.inode"} @@ -37631,6 +37149,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.mode"} @@ -37647,6 +37169,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.modification_time"} @@ -37660,6 +37186,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.mount_id"} @@ -37673,6 +37203,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.name"} @@ -37694,6 +37228,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.name"} @@ -37707,6 +37245,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.source_version"} @@ -37720,6 +37262,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.version"} @@ -37733,6 +37279,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.path"} @@ -37754,15 +37304,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.interpreter.file.rights"} - } - ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.interpreter.file.rights"} case "signal.target.ancestors.interpreter.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -37770,6 +37312,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.uid"} @@ -37783,6 +37329,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Ancestor == nil { ev.Signal.Target.Ancestor = &ProcessCacheEntry{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.user"} @@ -37960,38 +37510,17 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.args"} - } - ev.Signal.Target.Process.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.args"} case "signal.target.args_flags": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv) - case []string: - ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.args_flags"} case "signal.target.args_options": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv) - case []string: - ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.args_options"} case "signal.target.args_truncated": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -38381,15 +37910,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.file.rights"} - } - ev.Signal.Target.Process.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.file.rights"} case "signal.target.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -38474,6 +37995,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.change_time"} @@ -38484,6 +38009,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.filesystem"} @@ -38494,6 +38023,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.gid"} @@ -38504,6 +38037,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.group"} @@ -38514,6 +38051,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.Signal.Target.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Process.LinuxBinprm.FileEvent.Hashes, rv) @@ -38527,6 +38068,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.in_upper_layer"} @@ -38537,6 +38082,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.inode"} @@ -38547,6 +38096,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.mode"} @@ -38560,6 +38113,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.modification_time"} @@ -38570,6 +38127,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.mount_id"} @@ -38580,6 +38141,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.name"} @@ -38595,6 +38160,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.name"} @@ -38605,6 +38174,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.source_version"} @@ -38615,6 +38188,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.version"} @@ -38625,6 +38202,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.path"} @@ -38640,19 +38221,15 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.interpreter.file.rights"} - } - ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.interpreter.file.rights"} case "signal.target.interpreter.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.uid"} @@ -38663,6 +38240,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Process.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.user"} @@ -38706,12 +38287,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } - rv, ok := value.(string) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args"} - } - ev.Signal.Target.Parent.Args = rv - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.parent.args"} case "signal.target.parent.args_flags": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -38719,15 +38295,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv) - case []string: - ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args_flags"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.parent.args_flags"} case "signal.target.parent.args_options": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -38735,15 +38303,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } - switch rv := value.(type) { - case string: - ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv) - case []string: - ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv...) - default: - return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args_options"} - } - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.parent.args_options"} case "signal.target.parent.args_truncated": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -39250,15 +38810,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.parent.file.rights"} - } - ev.Signal.Target.Parent.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.parent.file.rights"} case "signal.target.parent.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -39370,6 +38922,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.change_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.change_time"} @@ -39383,6 +38939,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.filesystem", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.filesystem"} @@ -39396,6 +38956,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.gid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.gid"} @@ -39409,6 +38973,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.group", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.group"} @@ -39422,6 +38990,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.hashes", value) + if err != nil || !cont { + return err + } switch rv := value.(type) { case string: ev.Signal.Target.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.Hashes, rv) @@ -39438,6 +39010,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.in_upper_layer", value) + if err != nil || !cont { + return err + } rv, ok := value.(bool) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.in_upper_layer"} @@ -39451,6 +39027,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.inode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.inode"} @@ -39464,6 +39044,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.mode", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.mode"} @@ -39480,6 +39064,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.modification_time", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.modification_time"} @@ -39493,6 +39081,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.mount_id", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.mount_id"} @@ -39506,6 +39098,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.name"} @@ -39527,6 +39123,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.package.name", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.name"} @@ -39540,6 +39140,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.package.source_version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.source_version"} @@ -39553,6 +39157,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.package.version", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.version"} @@ -39566,6 +39174,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.path", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.path"} @@ -39587,15 +39199,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "signal.target.parent.interpreter.file.rights"} - } - ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "signal.target.parent.interpreter.file.rights"} case "signal.target.parent.interpreter.file.uid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} @@ -39603,6 +39207,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.uid", value) + if err != nil || !cont { + return err + } rv, ok := value.(int) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.uid"} @@ -39616,6 +39224,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { if ev.Signal.Target.Parent == nil { ev.Signal.Target.Parent = &Process{} } + cont, err := SetInterpreterFields(&ev.Signal.Target.Parent.LinuxBinprm, "file.user", value) + if err != nil || !cont { + return err + } rv, ok := value.(string) if !ok { return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.user"} @@ -39997,15 +39609,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "splice.file.path.length": return &eval.ErrFieldReadOnly{Field: "splice.file.path.length"} case "splice.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "splice.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "splice.file.rights"} - } - ev.Splice.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "splice.file.rights"} case "splice.file.uid": rv, ok := value.(int) if !ok { @@ -40157,15 +39761,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "unlink.file.path.length": return &eval.ErrFieldReadOnly{Field: "unlink.file.path.length"} case "unlink.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "unlink.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "unlink.file.rights"} - } - ev.Unlink.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "unlink.file.rights"} case "unlink.file.uid": rv, ok := value.(int) if !ok { @@ -40345,15 +39941,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "utimes.file.path.length": return &eval.ErrFieldReadOnly{Field: "utimes.file.path.length"} case "utimes.file.rights": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "utimes.file.rights"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "utimes.file.rights"} - } - ev.Utimes.File.FileFields.Mode = uint16(rv) - return nil + return &eval.ErrFieldReadOnly{Field: "utimes.file.rights"} case "utimes.file.uid": rv, ok := value.(int) if !ok { diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go index 10f7a420f9af91..228ada283945f0 100644 --- a/pkg/security/secl/model/accessors_windows.go +++ b/pkg/security/secl/model/accessors_windows.go @@ -872,7 +872,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -896,7 +896,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -921,7 +922,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -946,7 +947,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -971,7 +972,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -997,7 +998,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1023,7 +1024,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1049,7 +1050,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1075,7 +1076,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1109,7 +1110,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1133,7 +1135,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1158,7 +1161,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1182,7 +1185,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1304,7 +1308,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1318,7 +1321,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.ContainerID @@ -1332,7 +1334,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -1346,7 +1347,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1360,7 +1360,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1375,7 +1374,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1401,7 +1399,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1426,7 +1423,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -1440,7 +1436,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -1454,7 +1449,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1468,7 +1462,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.OwnerSidString @@ -2024,9 +2017,6 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } ctx := eval.NewContext(ev) value := evaluator.Eval(ctx) - if ctx.Error != nil { - return nil, ctx.Error - } return value, nil } func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { diff --git a/pkg/security/secl/model/iterator.go b/pkg/security/secl/model/iterator.go index 34dd77752ccf0f..aba140d0c0eb96 100644 --- a/pkg/security/secl/model/iterator.go +++ b/pkg/security/secl/model/iterator.go @@ -6,7 +6,9 @@ // Package model holds model related files package model -import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" +import ( + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" +) // Iterator is a generic interface that iterators must implement type Iterator[T any] interface { @@ -22,21 +24,21 @@ func isNil[V comparable](v V) bool { return v == zero } -func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { +func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIterCb func(ev *Event, current V) T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { - results = append(results, perIter(ev, entry)) + results = append(results, perIterCb(ev, entry)) } ctx.IteratorCountCache[field] = len(results) return results } -func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { +func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIterCb func(ev *Event, current V) []T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { - results = append(results, perIter(ev, entry)...) + results = append(results, perIterCb(ev, entry)...) count++ } ctx.IteratorCountCache[field] = count diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index cb6666fb162598..c2b387482e5116 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -504,17 +504,16 @@ func NewProcessCacheEntry(coreRelease func(_ *ProcessCacheEntry)) *ProcessCacheE // ProcessAncestorsIterator defines an iterator of ancestors type ProcessAncestorsIterator struct { + Root *ProcessCacheEntry prev *ProcessCacheEntry } // Front returns the first element -func (it *ProcessAncestorsIterator) Front(ctx *eval.Context) *ProcessCacheEntry { - if front := ctx.Event.(*Event).ProcessContext.Ancestor; front != nil { - it.prev = front - return front +func (it *ProcessAncestorsIterator) Front(_ *eval.Context) *ProcessCacheEntry { + if it.Root != nil { + it.prev = it.Root } - - return nil + return it.prev } // Next returns the next element diff --git a/pkg/security/secl/model/model_helpers_unix.go b/pkg/security/secl/model/model_helpers_unix.go index 4c47e726fbe6fc..933b2e38a081e8 100644 --- a/pkg/security/secl/model/model_helpers_unix.go +++ b/pkg/security/secl/model/model_helpers_unix.go @@ -31,6 +31,9 @@ const ( // SizeOfCookie size of cookie SizeOfCookie = 8 + + // FakeInodeMSW inode used internally + fakeInodeMSW uint64 = 0xdeadc001 ) // check that all path are absolute @@ -114,6 +117,11 @@ func (m *Model) ValidateField(field eval.Field, fieldValue eval.FieldValue) erro return nil } +// IsFakeInode returns whether the given inode is a fake inode +func IsFakeInode(inode uint64) bool { + return inode>>32 == fakeInodeMSW +} + // SetPathResolutionError sets the Event.pathResolutionError func (ev *Event) SetPathResolutionError(fileFields *FileEvent, err error) { fileFields.PathResolutionError = err diff --git a/pkg/security/secl/model/model_test.go b/pkg/security/secl/model/model_test.go index 90aaff09ca770c..f85e38db904e8e 100644 --- a/pkg/security/secl/model/model_test.go +++ b/pkg/security/secl/model/model_test.go @@ -120,9 +120,11 @@ func TestPathValidation(t *testing.T) { func TestSetFieldValue(t *testing.T) { var readOnlyError *eval.ErrFieldReadOnly var fieldNotSupportedError *eval.ErrNotSupported - event := NewFakeEvent() for _, field := range event.GetFields() { + // use a fresh event to not get polluted by previous SetFieldValue + event = NewFakeEvent() + _, kind, err := event.GetFieldMetadata(field) if err != nil { t.Fatal(err) diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index 458a0b36cb3cb8..41448b7010e2e2 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -228,6 +228,15 @@ type LinuxBinprm struct { FileEvent FileEvent `field:"file"` } +// SetInterpreterFields set the proper field so that this will be seen as a valid interpreter, see HasInterpreter +func SetInterpreterFields(bprm *LinuxBinprm, subField string, _ interface{}) (bool, error) { + // set a fake inode so that the interpreter becomes valid + if bprm.FileEvent.Inode == 0 && subField != "file.inode" { + bprm.FileEvent.Inode = fakeInodeMSW + } + return true, nil +} + // Process represents a process type Process struct { PIDContext @@ -240,9 +249,9 @@ type Process struct { SpanID uint64 `field:"-"` TraceID utils.TraceID `field:"-"` - TTYName string `field:"tty_name"` // SECLDoc[tty_name] Definition:`Name of the TTY associated with the process` - Comm string `field:"comm"` // SECLDoc[comm] Definition:`Comm attribute of the process` - LinuxBinprm LinuxBinprm `field:"interpreter,check:HasInterpreter"` // Script interpreter as identified by the shebang + TTYName string `field:"tty_name"` // SECLDoc[tty_name] Definition:`Name of the TTY associated with the process` + Comm string `field:"comm"` // SECLDoc[comm] Definition:`Comm attribute of the process` + LinuxBinprm LinuxBinprm `field:"interpreter,check:HasInterpreter,set_handler:SetInterpreterFields"` // Script interpreter as identified by the shebang // pid_cache_t ForkTime time.Time `field:"fork_time,opts:getters_only|gen_getters"` @@ -270,7 +279,7 @@ type Process struct { // defined to generate accessors, ArgsTruncated and EnvsTruncated are used during by unmarshaller Argv0 string `field:"argv0,handler:ResolveProcessArgv0,weight:100"` // SECLDoc[argv0] Definition:`First argument of the process` - Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` + Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad|readonly"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` Argv []string `field:"argv,handler:ResolveProcessArgv,weight:500; cmdargv,handler:ResolveProcessCmdArgv,opts:getters_only|gen_getters; args_flags,handler:ResolveProcessArgsFlags,opts:helper; args_options,handler:ResolveProcessArgsOptions,opts:helper"` // SECLDoc[argv] Definition:`Arguments of the process (as an array, excluding argv0)` Example:`exec.argv in ["127.0.0.1"]` Description:`Matches any process that has this IP address as one of its arguments.` SECLDoc[args_flags] Definition:`Flags in the process arguments` Example:`exec.args_flags in ["s"] && exec.args_flags in ["V"]` Description:`Matches any process with both "-s" and "-V" flags in its arguments. Also matches "-sV".` SECLDoc[args_options] Definition:`Argument of the process as options` Example:`exec.args_options in ["p=0-1024"]` Description:`Matches any process that has either "-p 0-1024" or "--p=0-1024" in its arguments.` ArgsTruncated bool `field:"args_truncated,handler:ResolveProcessArgsTruncated"` // SECLDoc[args_truncated] Definition:`Indicator of arguments truncation` Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` @@ -301,6 +310,14 @@ type Process struct { lineageError error `field:"-"` } +// SetAncestorFields force the process cache entry to be valid +func SetAncestorFields(pce *ProcessCacheEntry, subField string, _ interface{}) (bool, error) { + if subField != "is_kworker" { + pce.IsKworker = false + } + return true, nil +} + // ExecEvent represents a exec event type ExecEvent struct { SyscallContext @@ -818,6 +835,7 @@ type NetworkFlowMonitorEvent struct { // FlowsIterator defines an iterator of flows type FlowsIterator struct { + Root interface{} // not used, direct access from the event prev int } diff --git a/pkg/security/secl/model/model_windows.go b/pkg/security/secl/model/model_windows.go index 8fe667c85f2a64..83b80fe0e25f62 100644 --- a/pkg/security/secl/model/model_windows.go +++ b/pkg/security/secl/model/model_windows.go @@ -173,3 +173,8 @@ type ChangePermissionEvent struct { OldSd string `field:"old_sd,handler:ResolveOldSecurityDescriptor"` // SECLDoc[old_sd] Definition:`Original Security Descriptor of the object of which permission was changed` NewSd string `field:"new_sd,handler:ResolveNewSecurityDescriptor"` // SECLDoc[new_sd] Definition:`New Security Descriptor of the object of which permission was changed` } + +// SetAncestorFields force the process cache entry to be valid +func SetAncestorFields(_ *ProcessCacheEntry, _ string, _ interface{}) (bool, error) { + return true, nil +} diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go index 72d3c6801658e9..7898098932dc5d 100644 --- a/pkg/security/seclwin/model/accessors_win.go +++ b/pkg/security/seclwin/model/accessors_win.go @@ -870,7 +870,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -894,7 +894,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -919,7 +920,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -944,7 +945,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -969,7 +970,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -995,7 +996,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1021,7 +1022,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1047,7 +1048,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1073,7 +1074,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1107,7 +1108,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1131,7 +1133,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1156,7 +1159,7 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - iterator := &ProcessAncestorsIterator{} + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1180,7 +1183,8 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - iterator := &ProcessAncestorsIterator{} + ev := ctx.Event.(*Event) + iterator := &ProcessAncestorsIterator{Root: ev.BaseEvent.ProcessContext.Ancestor} if regID != "" { element := iterator.At(ctx, regID, ctx.Registers[regID]) if element == nil { @@ -1302,7 +1306,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1316,7 +1319,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.ContainerID @@ -1330,7 +1332,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -1344,7 +1345,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1358,7 +1358,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1373,7 +1372,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1399,7 +1397,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1424,7 +1421,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -1438,7 +1434,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -1452,7 +1447,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1466,7 +1460,6 @@ func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { - ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.OwnerSidString @@ -2022,9 +2015,6 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } ctx := eval.NewContext(ev) value := evaluator.Eval(ctx) - if ctx.Error != nil { - return nil, ctx.Error - } return value, nil } func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { diff --git a/pkg/security/seclwin/model/iterator.go b/pkg/security/seclwin/model/iterator.go index 34dd77752ccf0f..aba140d0c0eb96 100644 --- a/pkg/security/seclwin/model/iterator.go +++ b/pkg/security/seclwin/model/iterator.go @@ -6,7 +6,9 @@ // Package model holds model related files package model -import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" +import ( + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" +) // Iterator is a generic interface that iterators must implement type Iterator[T any] interface { @@ -22,21 +24,21 @@ func isNil[V comparable](v V) bool { return v == zero } -func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { +func newIterator[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIterCb func(ev *Event, current V) T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { - results = append(results, perIter(ev, entry)) + results = append(results, perIterCb(ev, entry)) } ctx.IteratorCountCache[field] = len(results) return results } -func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { +func newIteratorArray[T any, V comparable](iter Iterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIterCb func(ev *Event, current V) []T) []T { results := make([]T, 0, ctx.IteratorCountCache[field]) count := 0 for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { - results = append(results, perIter(ev, entry)...) + results = append(results, perIterCb(ev, entry)...) count++ } ctx.IteratorCountCache[field] = count diff --git a/pkg/security/seclwin/model/model.go b/pkg/security/seclwin/model/model.go index cb6666fb162598..c2b387482e5116 100644 --- a/pkg/security/seclwin/model/model.go +++ b/pkg/security/seclwin/model/model.go @@ -504,17 +504,16 @@ func NewProcessCacheEntry(coreRelease func(_ *ProcessCacheEntry)) *ProcessCacheE // ProcessAncestorsIterator defines an iterator of ancestors type ProcessAncestorsIterator struct { + Root *ProcessCacheEntry prev *ProcessCacheEntry } // Front returns the first element -func (it *ProcessAncestorsIterator) Front(ctx *eval.Context) *ProcessCacheEntry { - if front := ctx.Event.(*Event).ProcessContext.Ancestor; front != nil { - it.prev = front - return front +func (it *ProcessAncestorsIterator) Front(_ *eval.Context) *ProcessCacheEntry { + if it.Root != nil { + it.prev = it.Root } - - return nil + return it.prev } // Next returns the next element diff --git a/pkg/security/seclwin/model/model_win.go b/pkg/security/seclwin/model/model_win.go index 8fe667c85f2a64..83b80fe0e25f62 100644 --- a/pkg/security/seclwin/model/model_win.go +++ b/pkg/security/seclwin/model/model_win.go @@ -173,3 +173,8 @@ type ChangePermissionEvent struct { OldSd string `field:"old_sd,handler:ResolveOldSecurityDescriptor"` // SECLDoc[old_sd] Definition:`Original Security Descriptor of the object of which permission was changed` NewSd string `field:"new_sd,handler:ResolveNewSecurityDescriptor"` // SECLDoc[new_sd] Definition:`New Security Descriptor of the object of which permission was changed` } + +// SetAncestorFields force the process cache entry to be valid +func SetAncestorFields(_ *ProcessCacheEntry, _ string, _ interface{}) (bool, error) { + return true, nil +} diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index d82db8d08abf60..08186f1d473a63 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -1123,7 +1123,7 @@ func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *Proc ctx := eval.NewContext(e) - it := &model.ProcessAncestorsIterator{} + it := &model.ProcessAncestorsIterator{Root: e.ProcessContext.Ancestor} ptr := it.Front(ctx) var ancestor *model.ProcessCacheEntry @@ -1185,7 +1185,7 @@ func newDDContextSerializer(e *model.Event) *DDContextSerializer { } ctx := eval.NewContext(e) - it := &model.ProcessAncestorsIterator{} + it := &model.ProcessAncestorsIterator{Root: e.ProcessContext.Ancestor} ptr := it.Front(ctx) for ptr != nil { diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index b599db65fa22db..47a88081777805 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -107,6 +107,7 @@ func SkipIfNotAvailable(t *testing.T) { "TestLoginUID/login-uid-exec-test", "TestActionKillExcludeBinary", "~TestActionKillDisarm", + "~TestProcessInterpreter", } if disableSeccomp { diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index 4b04d15851cd8a..00c66b2ed6591f 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -28,7 +28,6 @@ import ( "time" sprobe "github.com/DataDog/datadog-agent/pkg/security/probe" - "github.com/DataDog/datadog-agent/pkg/security/probe/constantfetch" "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -37,7 +36,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/syndtr/gocapability/capability" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -2022,18 +2020,18 @@ func TestProcessInterpreter(t *testing.T) { scriptName: "regularExec.sh", executedScript: fmt.Sprintf(`#!/bin/bash -echo "Executing echo insIDe a bash script" +echo "Executing echo inside a bash script" %s - << EOF -print('Executing print insIDe a python (%s) script insIDe a bash script') +print('Executing print inside a python (%s) script inside a bash script') EOF echo "Back to bash"`, python, python), check: func(event *model.Event) { - var fieldNotSupportedError *eval.ErrNotSupported - _, err := event.GetFieldValue("exec.interpreter.file.name") - assert.ErrorAs(t, err, &fieldNotSupportedError, "exec event shouldn't have an interpreter") + value, err := event.GetFieldValue("exec.interpreter.file.name") + assert.NoError(t, err) + assert.Empty(t, value, "exec event shouldn't have an interpreter") assertFieldEqual(t, event, "process.parent.file.name", "regularExec.sh", "wrong process parent file name") assertFieldStringArrayIndexedOneOf(t, event, "process.ancestors.file.name", 0, []string{"regularExec.sh"}, "ancestor file name not an option") }, @@ -2047,19 +2045,19 @@ echo "Back to bash"`, python, python), scriptName: "regularExecWithInterpreterRule.sh", executedScript: fmt.Sprintf(`#!/bin/bash -echo "Executing echo insIDe a bash script" +echo "Executing echo inside a bash script" %s <<__HERE__ #!%s -print('Executing print insIDe a python (%s) script insIDe a bash script') +print('Executing print inside a python (%s) script inside a bash script') __HERE__ echo "Back to bash"`, python, python, python), check: func(event *model.Event) { - var fieldNotSupportedError *eval.ErrNotSupported - _, err := event.GetFieldValue("exec.interpreter.file.name") - assert.ErrorAs(t, err, &fieldNotSupportedError, "exec event shouldn't have an interpreter") + value, err := event.GetFieldValue("exec.interpreter.file.name") + assert.NoError(t, err) + assert.Empty(t, value, "exec event shouldn't have an interpreter") assertFieldEqual(t, event, "process.parent.file.name", "regularExecWithInterpreterRule.sh", "wrong process parent file name") assertFieldStringArrayIndexedOneOf(t, event, "process.ancestors.file.name", 0, []string{"regularExecWithInterpreterRule.sh"}, "ancestor file name not an option") }, @@ -2074,12 +2072,12 @@ echo "Back to bash"`, python, python, python), innerScriptName: "pyscript.py", executedScript: fmt.Sprintf(`#!/bin/bash -echo "Executing echo insIDe a bash script" +echo "Executing echo inside a bash script" cat << EOF > pyscript.py #!%s -print('Executing print insIDe a python (%s) script inside a bash script') +print('Executing print inside a python (%s) script inside a bash script') EOF @@ -2104,7 +2102,7 @@ chmod 755 pyscript.py // scriptName: "nestedInterpretedExec.sh", // executedScript: `#!/bin/bash // - //echo "Executing echo insIDe a bash script" + //echo "Executing echo inside a bash script" // //cat << '__HERE__' > hello.pl //#!/usr/bin/perl @@ -2121,7 +2119,7 @@ chmod 755 pyscript.py // //import subprocess // - //print('Executing print insIDe a python script') + //print('Executing print inside a python script') // //subprocess.run(["perl", "./hello.pl"]) // @@ -2146,11 +2144,6 @@ chmod 755 pyscript.py } defer testModule.Close() - p, ok := testModule.probe.PlatformProbe.(*sprobe.EBPFProbe) - if !ok { - t.Skip("not supported") - } - for _, test := range tests { testModule.Run(t, test.name, func(t *testing.T, _ wrapperType, _ func(cmd string, args []string, envs []string) *exec.Cmd) { scriptLocation := filepath.Join(os.TempDir(), test.scriptName) @@ -2169,9 +2162,6 @@ chmod 755 pyscript.py } t.Log(string(output)) - offsets, _ := p.GetOffsetConstants() - t.Logf("%s: %+v\n", constantfetch.OffsetNameLinuxBinprmStructFile, offsets[constantfetch.OffsetNameLinuxBinprmStructFile]) - return nil }, testModule.validateExecEvent(t, noWrapperType, func(event *model.Event, rule *rules.Rule) { assertTriggeredRule(t, rule, test.rule.ID) diff --git a/pkg/security/tests/schemas.go b/pkg/security/tests/schemas.go index 16b95fbc05aa6a..226e666e40122a 100644 --- a/pkg/security/tests/schemas.go +++ b/pkg/security/tests/schemas.go @@ -22,7 +22,6 @@ import ( "github.com/xeipuuv/gojsonschema" "github.com/DataDog/datadog-agent/pkg/security/events" - "github.com/DataDog/datadog-agent/pkg/security/resolvers/dentry" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/schemas" "github.com/DataDog/datadog-agent/pkg/security/serializers" @@ -322,7 +321,7 @@ func (v ValidInodeFormatChecker) IsFormat(input interface{}) bool { default: return false } - return !dentry.IsFakeInode(inode) + return !model.IsFakeInode(inode) } func validateSchema(t *testing.T, schemaLoader gojsonschema.JSONLoader, documentLoader gojsonschema.JSONLoader) (bool, error) { From 7d4d74fd93ed6708daf37216f8e5df2110e35de6 Mon Sep 17 00:00:00 2001 From: Stuart Geipel Date: Thu, 30 Jan 2025 16:07:26 -0500 Subject: [PATCH 83/97] [NPM-4131] Add netpath aggregator + e2e test (#33416) --- go.mod | 4 +- pkg/networkpath/payload/go.mod | 2 +- test/fakeintake/Dockerfile | 2 + .../aggregator/fixtures/netpath_bytes | Bin 0 -> 568 bytes .../aggregator/netpathAggregator.go | 68 +++++++++++ .../aggregator/netpathAggregator_test.go | 76 ++++++++++++ test/fakeintake/client/client.go | 24 ++++ test/fakeintake/client/client_test.go | 15 +++ .../client/fixtures/api_v2_netpath_response | 10 ++ test/fakeintake/go.mod | 4 + test/new-e2e/go.mod | 4 + test/new-e2e/pkg/environments/host.go | 6 +- .../network-path-integration/common_test.go | 62 ++++++++++ .../fake-traceroute/network_path.yaml | 8 ++ .../fake-traceroute/router_setup.sh | 47 ++++++++ .../fake-traceroute/router_teardown.sh | 8 ++ .../fake_traceroute_test.go | 109 ++++++++++++++++++ .../netpath_int_nix_test.go | 4 + .../netpath_int_win_test.go | 6 + 19 files changed, 455 insertions(+), 4 deletions(-) create mode 100644 test/fakeintake/aggregator/fixtures/netpath_bytes create mode 100644 test/fakeintake/aggregator/netpathAggregator.go create mode 100644 test/fakeintake/aggregator/netpathAggregator_test.go create mode 100644 test/fakeintake/client/fixtures/api_v2_netpath_response create mode 100644 test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/network_path.yaml create mode 100644 test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_setup.sh create mode 100644 test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_teardown.sh create mode 100644 test/new-e2e/tests/netpath/network-path-integration/fake_traceroute_test.go diff --git a/go.mod b/go.mod index 8c8e11265e4594..472025f39f2961 100644 --- a/go.mod +++ b/go.mod @@ -577,8 +577,8 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.64.0-devel github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 github.com/DataDog/datadog-agent/pkg/fips v0.0.0 // indirect - github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/pkg/networkpath/payload v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-20250128160050-7ac9ccd58c07 + github.com/DataDog/datadog-agent/pkg/networkpath/payload v0.0.0-20250128160050-7ac9ccd58c07 github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.64.0-devel github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.11 diff --git a/pkg/networkpath/payload/go.mod b/pkg/networkpath/payload/go.mod index d0e9b4de48c4d2..53f2b1942934b4 100644 --- a/pkg/networkpath/payload/go.mod +++ b/pkg/networkpath/payload/go.mod @@ -5,6 +5,6 @@ go 1.23.0 replace github.com/DataDog/datadog-agent/pkg/network/payload => ../../network/payload require ( - github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-20250128160050-7ac9ccd58c07 github.com/google/uuid v1.6.0 ) diff --git a/test/fakeintake/Dockerfile b/test/fakeintake/Dockerfile index eb19cdf969c03e..be078b9f78f6bb 100644 --- a/test/fakeintake/Dockerfile +++ b/test/fakeintake/Dockerfile @@ -15,6 +15,8 @@ COPY test/fakeintake/go.mod test/fakeintake/go.sum ./ # every datadog-agent module imported by the fakeintake has to be copied in the build image COPY pkg/proto /pkg/proto COPY comp/netflow/payload /comp/netflow/payload +COPY pkg/network/payload /pkg/network/payload +COPY pkg/networkpath/payload /pkg/networkpath/payload RUN go mod download diff --git a/test/fakeintake/aggregator/fixtures/netpath_bytes b/test/fakeintake/aggregator/fixtures/netpath_bytes new file mode 100644 index 0000000000000000000000000000000000000000..4b8073299d32209108dad2306d00d42c443e9be3 GIT binary patch literal 568 zcmV-80>}LyiwFP!00000|D2S~j-xmfh2Q0F2d@0f-*#SL7PD)Wkii#%irBO=?NOJG?;CUkilZ&AqLY^JJ~ca#ZWV?^RBc*u6W1t z&I~qZq^<0WUY3j?10SYwN|KJ^hY#;_7uk&Uigo!%aO@lG$1({%JC4-@0{#@-iui#v?dzs$HRA|4KkRP)>s>8P2y4^ zXC(?*AlEt_(T50)QZj=;sRwGUTiY8X*ZONeD^V)!dK|qUXWCW@y?2pZ#NHp1uY+G! zXj0*EhKBF7IYW+xK%J_*&2Py$%XwaiTnnULU}_;aGeZU;MratyI{7pG!E`f06T?R5 z?5ZPnI-7fU{=>BgzqC84{pbccn0fuplrNa_Ia9u3svT1(hU_cad*c1zl7K+2#q$XC zL(~k@bE%Ep(;sG6fvTvU3ltCDIWkmO7JKVA>lZM8?Y!KBd4WRK={wOV|Nj910RR8nHTIHK G1pojU&lC6n literal 0 HcmV?d00001 diff --git a/test/fakeintake/aggregator/netpathAggregator.go b/test/fakeintake/aggregator/netpathAggregator.go new file mode 100644 index 00000000000000..6f395cf0d43087 --- /dev/null +++ b/test/fakeintake/aggregator/netpathAggregator.go @@ -0,0 +1,68 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025-present Datadog, Inc. + +package aggregator + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/test/fakeintake/api" +) + +// Netpath represents a network path payload +type Netpath struct { + collectedTime time.Time + payload.NetworkPath +} + +func (p *Netpath) name() string { + return fmt.Sprintf("%s:%d %s", p.Destination.Hostname, p.Destination.Port, p.Protocol) +} + +// GetTags return the tags from a payload +func (p *Netpath) GetTags() []string { + return []string{} +} + +// GetCollectedTime return the time when the payload has been collected by the fakeintake server +func (p *Netpath) GetCollectedTime() time.Time { + return p.collectedTime +} + +// ParseNetpathPayload parses an api.Payload into a list of Netpath +func ParseNetpathPayload(payload api.Payload) (netpaths []*Netpath, err error) { + if len(payload.Data) == 0 || bytes.Equal(payload.Data, []byte("{}")) { + return []*Netpath{}, nil + } + enflated, err := enflate(payload.Data, payload.Encoding) + if err != nil { + return nil, err + } + netpaths = []*Netpath{} + err = json.Unmarshal(enflated, &netpaths) + if err != nil { + return nil, err + } + for _, n := range netpaths { + n.collectedTime = payload.Timestamp + } + return netpaths, err +} + +// NetpathAggregator is an Aggregator for netpath payloads +type NetpathAggregator struct { + Aggregator[*Netpath] +} + +// NewNetpathAggregator return a new NetpathAggregator +func NewNetpathAggregator() NetpathAggregator { + return NetpathAggregator{ + Aggregator: newAggregator(ParseNetpathPayload), + } +} diff --git a/test/fakeintake/aggregator/netpathAggregator_test.go b/test/fakeintake/aggregator/netpathAggregator_test.go new file mode 100644 index 00000000000000..f2f54f81f12db4 --- /dev/null +++ b/test/fakeintake/aggregator/netpathAggregator_test.go @@ -0,0 +1,76 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package aggregator + +import ( + _ "embed" + "testing" + + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/test/fakeintake/api" + "github.com/stretchr/testify/assert" +) + +//go:embed fixtures/netpath_bytes +var netpathData []byte + +func TestNetpathAggregator(t *testing.T) { + t.Run("ParseNetpathPayload should return empty Netpath array on empty data", func(t *testing.T) { + netpaths, err := ParseNetpathPayload(api.Payload{Data: []byte(""), Encoding: encodingEmpty}) + assert.NoError(t, err) + assert.Empty(t, netpaths) + }) + + t.Run("ParseNetpathPayload should return empty Netpath array on empty json object", func(t *testing.T) { + netpaths, err := ParseNetpathPayload(api.Payload{Data: []byte("{}"), Encoding: encodingJSON}) + assert.NoError(t, err) + assert.Empty(t, netpaths) + }) + + t.Run("ParseNetpathPayload should return a valid Netpath on valid payload", func(t *testing.T) { + netpaths, err := ParseNetpathPayload(api.Payload{Data: netpathData, Encoding: encodingGzip}) + assert.NoError(t, err) + + assert.Len(t, netpaths, 1) + np := netpaths[0] + + assert.Equal(t, int64(1737933404281), np.Timestamp) + assert.Equal(t, "7.64.0-devel+git.40.38beef2", np.AgentVersion) + assert.Equal(t, "default", np.Namespace) + assert.Equal(t, "da6f9055-b7df-41b0-bafd-0e5d3c6c370e", np.PathtraceID) + assert.Equal(t, payload.PathOrigin("network_path_integration"), np.Origin) + assert.Equal(t, payload.Protocol("TCP"), np.Protocol) + assert.Equal(t, "i-019fda1a9f830d95e", np.Source.Hostname) + assert.Equal(t, "subnet-091570395d476e9ce", np.Source.Via.Subnet.Alias) + assert.Equal(t, "vpc-029c0faf8f49dee8d", np.Source.NetworkID) + assert.Equal(t, "api.datadoghq.eu", np.Destination.Hostname) + assert.Equal(t, "34.107.236.155", np.Destination.IPAddress) + assert.Equal(t, uint16(443), np.Destination.Port) + assert.Equal(t, "155.236.107.34.bc.googleusercontent.com", np.Destination.ReverseDNSHostname) + + assert.Len(t, np.Hops, 9) + assert.Equal(t, payload.NetworkPathHop{ + TTL: 1, + IPAddress: "10.1.62.52", + Hostname: "ip-10-1-62-52.ec2.internal", + RTT: 0.39, + Reachable: true, + }, np.Hops[0]) + assert.Equal(t, payload.NetworkPathHop{ + TTL: 2, + IPAddress: "unknown_hop_2", + Hostname: "unknown_hop_2", + Reachable: false, + }, np.Hops[1]) + assert.Equal(t, payload.NetworkPathHop{ + TTL: 9, + IPAddress: "34.107.236.155", + Hostname: "155.236.107.34.bc.googleusercontent.com", + RTT: 2.864, + Reachable: true, + }, np.Hops[8]) + }) +} diff --git a/test/fakeintake/client/client.go b/test/fakeintake/client/client.go index 83a767596e6890..3df9e03fa4b2a6 100644 --- a/test/fakeintake/client/client.go +++ b/test/fakeintake/client/client.go @@ -80,6 +80,7 @@ const ( orchestratorManifestEndpoint = "/api/v2/orchmanif" metadataEndpoint = "/api/v1/metadata" ndmflowEndpoint = "/api/v2/ndmflow" + netpathEndpoint = "/api/v2/netpath" apmTelemetryEndpoint = "/api/v2/apmtelemetry" ) @@ -119,6 +120,7 @@ type Client struct { orchestratorManifestAggregator aggregator.OrchestratorManifestAggregator metadataAggregator aggregator.MetadataAggregator ndmflowAggregator aggregator.NDMFlowAggregator + netpathAggregator aggregator.NetpathAggregator serviceDiscoveryAggregator aggregator.ServiceDiscoveryAggregator } @@ -145,6 +147,7 @@ func NewClient(fakeIntakeURL string, opts ...Option) *Client { orchestratorManifestAggregator: aggregator.NewOrchestratorManifestAggregator(), metadataAggregator: aggregator.NewMetadataAggregator(), ndmflowAggregator: aggregator.NewNDMFlowAggregator(), + netpathAggregator: aggregator.NewNetpathAggregator(), serviceDiscoveryAggregator: aggregator.NewServiceDiscoveryAggregator(), } for _, opt := range opts { @@ -280,6 +283,14 @@ func (c *Client) getNDMFlows() error { return c.ndmflowAggregator.UnmarshallPayloads(payloads) } +func (c *Client) getNetpathEvents() error { + payloads, err := c.getFakePayloads(netpathEndpoint) + if err != nil { + return err + } + return c.netpathAggregator.UnmarshallPayloads(payloads) +} + // FilterMetrics fetches fakeintake on `/api/v2/series` endpoint and returns // metrics matching `name` and any [MatchOpt](#MatchOpt) options func (c *Client) FilterMetrics(name string, options ...MatchOpt[*aggregator.MetricSeries]) ([]*aggregator.MetricSeries, error) { @@ -879,6 +890,19 @@ func (c *Client) GetNDMFlows() ([]*aggregator.NDMFlow, error) { return ndmflows, nil } +// GetNetpathEvents returns the latest netpath events by destination +func (c *Client) GetNetpathEvents() ([]*aggregator.Netpath, error) { + err := c.getNetpathEvents() + if err != nil { + return nil, err + } + var netpaths []*aggregator.Netpath + for _, name := range c.netpathAggregator.GetNames() { + netpaths = append(netpaths, c.netpathAggregator.GetPayloadsByName(name)...) + } + return netpaths, nil +} + // filterPayload returns payloads matching any [MatchOpt](#MatchOpt) options func filterPayload[T aggregator.PayloadItem](payloads []T, options ...MatchOpt[T]) ([]T, error) { // apply filters one after the other diff --git a/test/fakeintake/client/client_test.go b/test/fakeintake/client/client_test.go index bf023eeb3687fa..c6325dfce67e9a 100644 --- a/test/fakeintake/client/client_test.go +++ b/test/fakeintake/client/client_test.go @@ -56,6 +56,9 @@ var apiV1Metadata []byte //go:embed fixtures/api_v2_ndmflow_response var apiV2NDMFlow []byte +//go:embed fixtures/api_v2_netpath_response +var apiV2Netpath []byte + //go:embed fixtures/api_v2_telemetry_response var apiV2Teleemtry []byte @@ -569,6 +572,18 @@ func TestClient(t *testing.T) { assert.Empty(t, ndmflows[0].AdditionalFields) }) + t.Run("getNetpathEvents", func(t *testing.T) { + ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(apiV2Netpath) + })) + defer ts.Close() + + client := NewClient(ts.URL) + err := client.getNetpathEvents() + require.NoError(t, err) + assert.True(t, client.netpathAggregator.ContainsPayloadName("api.datadoghq.eu:443 TCP")) + }) + t.Run("getServiceDiscoveries", func(t *testing.T) { ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(apiV2Teleemtry) diff --git a/test/fakeintake/client/fixtures/api_v2_netpath_response b/test/fakeintake/client/fixtures/api_v2_netpath_response new file mode 100644 index 00000000000000..8331a1aae4df6c --- /dev/null +++ b/test/fakeintake/client/fixtures/api_v2_netpath_response @@ -0,0 +1,10 @@ +{ + "payloads": [ + { + "timestamp": "2025-01-26T23:16:45.522649265Z", + "data": "H4sIAAAAAAAA/5yUzY6jOBSF3+VuB678y992XmAWs2uVkMEXgorYtDGpRSnv3nIq3Z0QJS31Agn5Wt85Pgfz7RPidKQ1muMCDS9lWUupmBIVz8CM5GJ7orBO3kEDJRYKWW7pRPM/4xRRMZRVRzQIyMCZI62L6QkasDSYbY6QwWLiIQbTUzvZNDDFUDOt8660Q654x/LODDZnpK3si16WjCADH6ZxSpKO4ocP723CtJOLNAYTk5sMluCj7/0MDfz/73+Qweq3kNQ/4eDXmOxAA1POeD1Yw009VJLZWif+aTJp37p1jmJ6M/NkVmiuKzmruS6ZrLVVZUF1T3A+Z7/MXE5yWvqcibpngxmqQdWWqLJwzsDSGif35fLei1kmtCYa68fDd6QNMpiW1lgbaE3iUiFnJQpZINc6HdGHCI1SMoNAqQdqrVvbGyTX+ms/K1Eq7HocvR9n2lYKvXeRXMTeH5Ovg19WaFLhcYaG77Q5Q46FQJ2qvM1vyTnLeV6IXAukXmBqITgzQwYhRmgYyjr5M/3BdDNBE8NG5+yqI3Y6m3t3/sO1B7+0O6n96AY5mHn9zZQvmPI5U75iqh1TKIbsK9p74v3gkoBArsXzCPRD1AzTc6mN3dMfZleBqqieCxQP3jkyVFgWe+s36xcwf+283IN5gULW6TvhfJ/LfnZ1LqoXzqt9NEqg0Dz9ZzjbVfkw+5k9K58L1H+6Yn9xl25KUY/Kb+e3HwAAAP//AQAA///bNfaSVQUAAA==", + "encoding": "gzip", + "content_type": "application/json" + } + ] +} diff --git a/test/fakeintake/go.mod b/test/fakeintake/go.mod index 65a87348c7adb9..b07bdd154cd0d8 100644 --- a/test/fakeintake/go.mod +++ b/test/fakeintake/go.mod @@ -5,12 +5,15 @@ go 1.23.0 // every datadog-agent module replaced in the fakeintake go.mod needs to be copied in the Dockerfile replace ( github.com/DataDog/datadog-agent/comp/netflow/payload => ../../comp/netflow/payload + github.com/DataDog/datadog-agent/pkg/network/payload => ../../pkg/network/payload + github.com/DataDog/datadog-agent/pkg/networkpath/payload => ../../pkg/networkpath/payload github.com/DataDog/datadog-agent/pkg/proto => ../../pkg/proto ) require ( github.com/DataDog/agent-payload/v5 v5.0.141 github.com/DataDog/datadog-agent/comp/netflow/payload v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/networkpath/payload v0.0.0-20250128160050-7ac9ccd58c07 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/zstd v1.5.6 github.com/benbjohnson/clock v1.3.5 @@ -28,6 +31,7 @@ require ( ) require ( + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-20250128160050-7ac9ccd58c07 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index d1ec7304d0928b..62a3ec44a1e025 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -26,6 +26,8 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/network/payload => ../../pkg/network/payload + github.com/DataDog/datadog-agent/pkg/networkpath/payload => ../../pkg/networkpath/payload github.com/DataDog/datadog-agent/pkg/proto => ../../pkg/proto github.com/DataDog/datadog-agent/pkg/trace => ../../pkg/trace github.com/DataDog/datadog-agent/pkg/util/executable => ../../pkg/util/executable @@ -280,6 +282,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/core/tagger/types v0.64.0-devel + github.com/DataDog/datadog-agent/pkg/networkpath/payload v0.0.0-20250128160050-7ac9ccd58c07 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 github.com/aws/aws-sdk-go v1.55.6 @@ -291,6 +294,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/network/payload v0.0.0-20250128160050-7ac9ccd58c07 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/charmbracelet/x/ansi v0.6.0 // indirect github.com/charmbracelet/x/term v0.2.1 // indirect diff --git a/test/new-e2e/pkg/environments/host.go b/test/new-e2e/pkg/environments/host.go index fd07d96f960fa2..8be6a617c15f6b 100644 --- a/test/new-e2e/pkg/environments/host.go +++ b/test/new-e2e/pkg/environments/host.go @@ -64,8 +64,12 @@ func (e *Host) generateAndDownloadAgentFlare(outputDir string) (string, error) { // discard error, flare command might return error if there is no intake, but it the archive is still generated flareCommandOutput, err := e.Agent.Client.FlareWithError(agentclient.WithArgs([]string{"--email", "e2e-tests@datadog-agent", "--send", "--local"})) + lines := []string{flareCommandOutput} + if err != nil { + lines = append(lines, err.Error()) + } // on error, the flare output is in the error message - flareCommandOutput = strings.Join([]string{flareCommandOutput, err.Error()}, "\n") + flareCommandOutput = strings.Join(lines, "\n") // find .zip in flare command output // (?m) is a flag that allows ^ and $ to match the beginning and end of each line diff --git a/test/new-e2e/tests/netpath/network-path-integration/common_test.go b/test/new-e2e/tests/netpath/network-path-integration/common_test.go index 44a3fe90560c93..cc38d1f906736e 100644 --- a/test/new-e2e/tests/netpath/network-path-integration/common_test.go +++ b/test/new-e2e/tests/netpath/network-path-integration/common_test.go @@ -9,7 +9,9 @@ package networkpathintegration import ( _ "embed" "fmt" + "time" + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintakeclient "github.com/DataDog/datadog-agent/test/fakeintake/client" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -54,3 +56,63 @@ func assertMetrics(fakeIntake *components.FakeIntake, c *assert.CollectT, metric assert.NotEmpty(c, metrics, fmt.Sprintf("metric with tags `%v` not found", tags)) } } + +func (s *baseNetworkPathIntegrationTestSuite) findNetpath(isMatch func(*aggregator.Netpath) bool) (*aggregator.Netpath, error) { + nps, err := s.Env().FakeIntake.Client().GetNetpathEvents() + if err != nil { + return nil, err + } + if nps == nil { + return nil, fmt.Errorf("GetNetpathEvents() returned nil netpaths") + } + + var match *aggregator.Netpath + for _, np := range nps { + if isMatch(np) { + match = np + } + } + return match, nil +} +func (s *baseNetworkPathIntegrationTestSuite) expectNetpath(c *assert.CollectT, isMatch func(*aggregator.Netpath) bool) *aggregator.Netpath { + np, err := s.findNetpath(isMatch) + require.NoError(c, err) + + require.NotNil(c, np, "could not find matching netpath") + return np +} + +func assertPayloadBase(c *assert.CollectT, np *aggregator.Netpath, hostname string) { + assert.Equal(c, payload.PathOrigin("network_path_integration"), np.Origin) + assert.NotEmpty(c, np.PathtraceID) + assert.Equal(c, "default", np.Namespace) + + // check that the timestamp is reasonably close to the current time + tolerance := time.Hour + assert.Greater(c, np.Timestamp, time.Now().Add(-tolerance).UnixMilli()) + assert.Less(c, np.Timestamp, time.Now().Add(tolerance).UnixMilli()) + + assert.Equal(c, hostname, np.Source.Hostname) +} + +func (s *baseNetworkPathIntegrationTestSuite) checkDatadogEUTCP(c *assert.CollectT, agentHostname string) { + np := s.expectNetpath(c, func(np *aggregator.Netpath) bool { + return np.Destination.Hostname == "api.datadoghq.eu" && np.Protocol == "TCP" + }) + assert.Equal(c, uint16(443), np.Destination.Port) + + assertPayloadBase(c, np, agentHostname) + + assert.NotEmpty(c, np.Hops) +} + +func (s *baseNetworkPathIntegrationTestSuite) checkGoogleDNSUDP(c *assert.CollectT, agentHostname string) { + np := s.expectNetpath(c, func(np *aggregator.Netpath) bool { + return np.Destination.Hostname == "8.8.8.8" && np.Protocol == "UDP" + }) + assert.NotZero(c, np.Destination.Port) + + assertPayloadBase(c, np, agentHostname) + + assert.NotEmpty(c, np.Hops) +} diff --git a/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/network_path.yaml b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/network_path.yaml new file mode 100644 index 00000000000000..4ca490afe2b072 --- /dev/null +++ b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/network_path.yaml @@ -0,0 +1,8 @@ +init_config: + min_collection_interval: 10 +instances: + - hostname: 198.51.100.2 + protocol: TCP + port: 443 + - hostname: 198.51.100.2 + protocol: UDP diff --git a/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_setup.sh b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_setup.sh new file mode 100644 index 00000000000000..9bfcb7cdf1d038 --- /dev/null +++ b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_setup.sh @@ -0,0 +1,47 @@ +#!/bin/sh +set -e + +# ASCII art map of namespaces and interfaces: +# | | +# default | router | endpoint +# | | +# veth0 ------> veth1 veth2 ---------> veth3 +# 192.0.2.1 | 192.0.2.2 198.51.100.1| 198.51.100.2 +# | | +#-------------------------------------------------------------------- + +# create namespaces +ip netns add router +ip netns add endpoint + +# create two veth pairs, the router namespace will route veth1 -> veth2 +ip link add veth0 type veth peer name veth1 +ip link add veth2 type veth peer name veth3 + +# move interfaces into namespaces +ip link set veth1 netns router +ip link set veth2 netns router +ip link set veth3 netns endpoint + +# assign IPs from the TEST-NET-1 CIDR block +ip addr add 192.0.2.1/24 dev veth0 +ip link set veth0 up + +ip netns exec router ip addr add 192.0.2.2/24 dev veth1 +ip netns exec router ip link set veth1 up + +# endpoint side has TEST-NET-2 IPs +ip netns exec router ip addr add 198.51.100.1/24 dev veth2 +ip netns exec router ip link set veth2 up + +ip netns exec router ip link set lo up + +ip netns exec endpoint ip addr add 198.51.100.2/24 dev veth3 +ip netns exec endpoint ip link set veth3 up + +ip netns exec endpoint ip link set lo up + +# route the packets inside the router namespace and access the gateway using veth0 +ip netns exec router sysctl -w net.ipv4.ip_forward=1 +ip netns exec endpoint ip route add default via 198.51.100.1 +ip route add 198.51.100.0/24 via 192.0.2.2 dev veth0 diff --git a/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_teardown.sh b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_teardown.sh new file mode 100644 index 00000000000000..99f59125d36606 --- /dev/null +++ b/test/new-e2e/tests/netpath/network-path-integration/fake-traceroute/router_teardown.sh @@ -0,0 +1,8 @@ +#!/bin/sh +# no set -e because these may or may not exist +ip link delete veth0 +ip link delete veth1 +ip link delete veth2 +ip link delete veth3 +ip netns delete router +ip netns delete endpoint diff --git a/test/new-e2e/tests/netpath/network-path-integration/fake_traceroute_test.go b/test/new-e2e/tests/netpath/network-path-integration/fake_traceroute_test.go new file mode 100644 index 00000000000000..e45c13c63a0d75 --- /dev/null +++ b/test/new-e2e/tests/netpath/network-path-integration/fake_traceroute_test.go @@ -0,0 +1,109 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package netpath contains e2e tests for Network Path Integration feature +package networkpathintegration + +import ( + _ "embed" + "testing" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" +) + +//go:embed fake-traceroute/network_path.yaml +var fakeNetworkPathYaml []byte + +//go:embed fake-traceroute/router_setup.sh +var fakeRouterSetupScript []byte + +//go:embed fake-traceroute/router_teardown.sh +var fakeRouterTeardownScript []byte + +type fakeTracerouteTestSuite struct { + baseNetworkPathIntegrationTestSuite +} + +func TestFakeTracerouteSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &fakeTracerouteTestSuite{}, e2e.WithProvisioner(awshost.Provisioner( + awshost.WithAgentOptions( + agentparams.WithSystemProbeConfig(string(sysProbeConfig)), + agentparams.WithIntegration("network_path.d", string(fakeNetworkPathYaml)), + agentparams.WithFile("/tmp/router_setup.sh", string(fakeRouterSetupScript), false), + agentparams.WithFile("/tmp/router_teardown.sh", string(fakeRouterTeardownScript), false), + )), + )) + +} + +func (s *fakeTracerouteTestSuite) TestFakeTraceroute() { + t := s.T() + t.Cleanup(func() { + s.Env().RemoteHost.MustExecute("sudo sh /tmp/router_teardown.sh") + }) + s.Env().RemoteHost.MustExecute("sudo sh /tmp/router_setup.sh") + + routerIP := "192.0.2.2" + targetIP := "198.51.100.2" + + hostname := s.Env().Agent.Client.Hostname() + + validatePath := func(c *assert.CollectT, np *aggregator.Netpath) { + assert.Equal(c, payload.PathOrigin("network_path_integration"), np.Origin) + assert.NotEmpty(c, np.PathtraceID) + assert.Equal(c, "default", np.Namespace) + + // check that the timestamp is reasonably close to the current time + tolerance := time.Hour + assert.Greater(c, np.Timestamp, time.Now().Add(-tolerance).UnixMilli()) + assert.Less(c, np.Timestamp, time.Now().Add(tolerance).UnixMilli()) + + assert.Equal(c, hostname, np.Source.Hostname) + assert.Equal(c, targetIP, np.Destination.Hostname) + assert.Equal(c, targetIP, np.Destination.IPAddress) + assert.NotZero(c, np.Destination.Port) + + if !assert.Len(c, np.Hops, 2) { + return + } + + assert.Equal(c, 1, np.Hops[0].TTL) + assert.Equal(c, routerIP, np.Hops[0].IPAddress) + assert.Equal(c, routerIP, np.Hops[0].Hostname) + assert.True(c, np.Hops[0].Reachable) + + assert.Equal(c, 2, np.Hops[1].TTL) + assert.Equal(c, targetIP, np.Hops[1].IPAddress) + assert.Equal(c, targetIP, np.Hops[1].Hostname) + assert.True(c, np.Hops[1].Reachable) + } + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + nps, err := s.Env().FakeIntake.Client().GetNetpathEvents() + assert.NoError(c, err, "GetNetpathEvents() errors") + if !assert.NotNil(c, nps, "GetNetpathEvents() returned nil netpaths") { + return + } + + udpPath := s.expectNetpath(c, func(np *aggregator.Netpath) bool { + return np.Destination.Hostname == targetIP && np.Protocol == "UDP" + }) + tcpPath := s.expectNetpath(c, func(np *aggregator.Netpath) bool { + return np.Destination.Hostname == targetIP && np.Protocol == "TCP" + }) + + validatePath(c, udpPath) + validatePath(c, tcpPath) + assert.Equal(c, uint16(443), tcpPath.Destination.Port) + }, 2*time.Minute, 3*time.Second) +} diff --git a/test/new-e2e/tests/netpath/network-path-integration/netpath_int_nix_test.go b/test/new-e2e/tests/netpath/network-path-integration/netpath_int_nix_test.go index 964382b9af13a5..41358ddf385f98 100644 --- a/test/new-e2e/tests/netpath/network-path-integration/netpath_int_nix_test.go +++ b/test/new-e2e/tests/netpath/network-path-integration/netpath_int_nix_test.go @@ -36,10 +36,14 @@ func TestLinuxNetworkPathIntegrationSuite(t *testing.T) { func (s *linuxNetworkPathIntegrationTestSuite) TestLinuxNetworkPathIntegrationMetrics() { fakeIntake := s.Env().FakeIntake + hostname := s.Env().Agent.Client.Hostname() s.EventuallyWithT(func(c *assert.CollectT) { assertMetrics(fakeIntake, c, [][]string{ testAgentRunningMetricTagsTCP, testAgentRunningMetricTagsUDP, }) + + s.checkDatadogEUTCP(c, hostname) + s.checkGoogleDNSUDP(c, hostname) }, 5*time.Minute, 3*time.Second) } diff --git a/test/new-e2e/tests/netpath/network-path-integration/netpath_int_win_test.go b/test/new-e2e/tests/netpath/network-path-integration/netpath_int_win_test.go index ae449f2b8ca5bc..64e167db8dd97c 100644 --- a/test/new-e2e/tests/netpath/network-path-integration/netpath_int_win_test.go +++ b/test/new-e2e/tests/netpath/network-path-integration/netpath_int_win_test.go @@ -38,11 +38,17 @@ func TestWindowsNetworkPathIntegrationSuite(t *testing.T) { func (s *windowsNetworkPathIntegrationTestSuite) TestWindowsNetworkPathIntegrationMetrics() { fakeIntake := s.Env().FakeIntake + hostname := s.Env().Agent.Client.Hostname() s.EventuallyWithT(func(c *assert.CollectT) { assertMetrics(fakeIntake, c, [][]string{ testAgentRunningMetricTagsTCP, // TODO: Test UDP once implemented for windows, uncomment line below //testAgentRunningMetricTagsUDP, }) + + s.checkDatadogEUTCP(c, hostname) + // TODO: Test UDP once implemented for windows, uncomment line below + // s.checkGoogleDNSUDP(c, hostname) + }, 5*time.Minute, 3*time.Second) } From 613673778be6a83e2dbc0125ae7503bf1ae3a3a1 Mon Sep 17 00:00:00 2001 From: Caleb Metz <135133572+cmetz100@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:53:49 -0500 Subject: [PATCH 84/97] Upgrade lading to `0.25.5` and update trace-agent expvar config in `lading.yaml` (#33593) Signed-off-by: Caleb Metz --- test/regression/cases/quality_gate_idle/lading/lading.yaml | 2 +- .../cases/quality_gate_idle_all_features/lading/lading.yaml | 2 +- test/regression/config.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/regression/cases/quality_gate_idle/lading/lading.yaml b/test/regression/cases/quality_gate_idle/lading/lading.yaml index 5e2eb2566ef45f..ebc1dede1809d3 100644 --- a/test/regression/cases/quality_gate_idle/lading/lading.yaml +++ b/test/regression/cases/quality_gate_idle/lading/lading.yaml @@ -16,7 +16,7 @@ target_metrics: tags: sub_agent: "process" - expvar: #trace agent telemetry - uri: "http://127.0.0.1:5012/debug/vars" + uri: "https://127.0.0.1:5012/debug/vars" vars: - "/Event" - "/ServiceCheck" diff --git a/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml b/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml index 666731d6778a90..0e855694d064f7 100644 --- a/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml +++ b/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml @@ -18,7 +18,7 @@ target_metrics: tags: sub_agent: "process" - expvar: #trace agent telemetry - uri: "http://127.0.0.1:5012/debug/vars" + uri: "https://127.0.0.1:5012/debug/vars" vars: - "/Event" - "/ServiceCheck" diff --git a/test/regression/config.yaml b/test/regression/config.yaml index 8c4f568a4dfb23..3621c3c48f8401 100644 --- a/test/regression/config.yaml +++ b/test/regression/config.yaml @@ -1,5 +1,5 @@ lading: - version: 0.25.4 + version: 0.25.5 target: From 91794f654cad0d5a4ab1b455dfe626f30d8cb890 Mon Sep 17 00:00:00 2001 From: Scott Opell Date: Thu, 30 Jan 2025 18:03:21 -0500 Subject: [PATCH 85/97] Lowers SMP memory limits as a result of #33528 (#33598) --- test/regression/cases/quality_gate_idle/experiment.yaml | 2 +- .../cases/quality_gate_idle_all_features/experiment.yaml | 2 +- test/regression/cases/quality_gate_logs/experiment.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/regression/cases/quality_gate_idle/experiment.yaml b/test/regression/cases/quality_gate_idle/experiment.yaml index 48c488f551fcab..c97b662c817830 100644 --- a/test/regression/cases/quality_gate_idle/experiment.yaml +++ b/test/regression/cases/quality_gate_idle/experiment.yaml @@ -35,7 +35,7 @@ checks: description: "Memory usage quality gate. This puts a bound on the total agent memory usage." bounds: series: total_rss_bytes - upper_bound: "365.0 MiB" + upper_bound: "359.0 MiB" - name: intake_connections description: "Connections established to intake APIs. This puts a bound on total connections per Agent instance." diff --git a/test/regression/cases/quality_gate_idle_all_features/experiment.yaml b/test/regression/cases/quality_gate_idle_all_features/experiment.yaml index 839e36ad967969..ae371504ee3006 100644 --- a/test/regression/cases/quality_gate_idle_all_features/experiment.yaml +++ b/test/regression/cases/quality_gate_idle_all_features/experiment.yaml @@ -51,7 +51,7 @@ checks: description: "Memory usage quality gate. This puts a bound on the total agent memory usage." bounds: series: total_rss_bytes - upper_bound: "734.0 MiB" + upper_bound: "724.0 MiB" - name: intake_connections description: "Connections established to intake APIs. This puts a bound on total connections per Agent instance." diff --git a/test/regression/cases/quality_gate_logs/experiment.yaml b/test/regression/cases/quality_gate_logs/experiment.yaml index 59d969e257ced3..c1c4b3c42e9d6e 100644 --- a/test/regression/cases/quality_gate_logs/experiment.yaml +++ b/test/regression/cases/quality_gate_logs/experiment.yaml @@ -28,7 +28,7 @@ checks: description: "Memory usage" bounds: series: total_rss_bytes - upper_bound: 419MiB + upper_bound: "406.0 MiB" - name: lost_bytes description: "Allowable bytes not polled by log Agent" From b6b1c510e74e40e6161b8f11a42c5f3291376c6f Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 31 Jan 2025 09:18:48 +0100 Subject: [PATCH 86/97] fix(slack): Use a non-private channel (#33583) --- tasks/libs/pipeline/github_slack_review_map.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/libs/pipeline/github_slack_review_map.yaml b/tasks/libs/pipeline/github_slack_review_map.yaml index 58a93bd2347a42..c93a70bdd338e4 100644 --- a/tasks/libs/pipeline/github_slack_review_map.yaml +++ b/tasks/libs/pipeline/github_slack_review_map.yaml @@ -32,7 +32,7 @@ '@datadog/windows-agent': '#windows-agent-reviews' '@datadog/windows-kernel-integrations': '#windows-kernel-integrations-reviews' '@datadog/opentelemetry': '#opentelemetry' -'@datadog/agent-e2e-testing': '#agent-e2e-framework-devs' +'@datadog/agent-e2e-testing': '#agent-e2e-framework-help' '@datadog/software-integrity-and-trust': '#sit' '@datadog/single-machine-performance': '#single-machine-performance' '@datadog/agent-integrations': '#agent-integrations' From e70c16d5fbd13ce658d0aabce78f9921e9b0b95c Mon Sep 17 00:00:00 2001 From: Jonathan Ribas Date: Fri, 31 Jan 2025 10:31:00 +0100 Subject: [PATCH 87/97] Fix: prevent dlopen calls when compiled statically (#33485) --- pkg/util/system/dlopen_linux.go | 2 +- pkg/util/system/dlopen_other.go | 2 +- tasks/system_probe.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/util/system/dlopen_linux.go b/pkg/util/system/dlopen_linux.go index bfc3e9e273a5e1..50eea89c4e72d0 100644 --- a/pkg/util/system/dlopen_linux.go +++ b/pkg/util/system/dlopen_linux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && cgo +//go:build linux && cgo && !static package system diff --git a/pkg/util/system/dlopen_other.go b/pkg/util/system/dlopen_other.go index 51a4e1d97c1169..0362b47898244b 100644 --- a/pkg/util/system/dlopen_other.go +++ b/pkg/util/system/dlopen_other.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build !linux || !cgo +//go:build !linux || !cgo || static package system diff --git a/tasks/system_probe.py b/tasks/system_probe.py index 499b2ad2e82fcd..567abcf0176c5e 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -762,7 +762,7 @@ def build_sysprobe_binary( ldflags += ' -s -w' if static: - build_tags.extend(["osusergo", "netgo"]) + build_tags.extend(["osusergo", "netgo", "static"]) build_tags = list(set(build_tags).difference({"netcgo"})) if not is_windows and "pcap" in build_tags: From ee4c0bd87f6313c7ee7ad6b0b88d4cd40add57fd Mon Sep 17 00:00:00 2001 From: maxime mouial Date: Fri, 31 Jan 2025 10:53:17 +0100 Subject: [PATCH 88/97] Update CODEOWNER following agent-shared-components split (#33477) --- .ddqa/config.toml | 18 ++ .github/CODEOWNERS | 165 +++++++++--------- .github/dependabot.yaml | 2 +- .github/workflows/buildimages-update.yml | 2 +- .github/workflows/update_dependencies.yml | 2 +- .gitlab-ci.yml | 14 +- .gitlab/JOBOWNERS | 2 +- .gitlab/e2e/e2e.yml | 23 ++- comp/README.md | 24 ++- comp/agent/autoexit/component.go | 2 +- comp/agent/bundle.go | 2 +- comp/agent/expvarserver/component.go | 2 +- .../diagnosesendermanager/component.go | 2 +- comp/api/api/def/component.go | 2 +- comp/api/authtoken/component.go | 2 +- comp/api/bundle.go | 2 +- comp/core/agenttelemetry/def/component.go | 2 +- comp/core/bundle.go | 2 +- comp/core/bundle_mock.go | 2 +- comp/core/config/component.go | 2 +- comp/core/configsync/component.go | 2 +- comp/core/flare/component.go | 2 +- comp/core/gui/component.go | 2 +- comp/core/healthprobe/def/component.go | 2 +- comp/core/hostname/component.go | 2 +- .../hostname/hostnameinterface/component.go | 2 +- comp/core/log/def/component.go | 2 +- comp/core/lsof/def/component.go | 2 +- comp/core/pid/component.go | 2 +- .../core/remoteagentregistry/def/component.go | 2 +- comp/core/secrets/component.go | 2 +- comp/core/settings/component.go | 2 +- comp/core/status/component.go | 2 +- comp/core/telemetry/component.go | 2 +- comp/metadata/bundle.go | 2 +- comp/metadata/host/component.go | 2 +- comp/metadata/host/component_mock.go | 2 +- comp/metadata/inventoryagent/component.go | 2 +- comp/metadata/inventorychecks/component.go | 2 +- comp/metadata/inventoryhost/component.go | 2 +- comp/metadata/inventoryotel/component.go | 2 +- comp/metadata/resources/component.go | 2 +- comp/metadata/runner/component.go | 2 +- comp/metadata/securityagent/def/component.go | 2 +- comp/metadata/systemprobe/def/component.go | 2 +- docs/public/components/creating-components.md | 2 +- tasks/components.py | 2 +- tasks/libs/issue/assign.py | 3 +- tasks/libs/issue/model/constants.py | 3 +- tasks/libs/pipeline/github_jira_map.yaml | 3 +- tasks/libs/pipeline/github_slack_map.yaml | 3 +- .../pipeline/github_slack_review_map.yaml | 3 +- tasks/linter.py | 3 +- tasks/unit_tests/components_tests.py | 8 +- tasks/unit_tests/issue_tests.py | 2 +- tasks/unit_tests/junit_tests.py | 4 +- tasks/unit_tests/release_tests.py | 4 +- .../components_src/comp/classic/component.go | 2 +- .../components_src/comp/group/bundle.go | 2 +- .../comp/group/inbundle/def/component.go | 2 +- .../comp/multiple/def/component.go | 2 +- .../comp/newstyle/def/component.go | 2 +- .../agentenv_file_permissions_test.go | 2 +- .../agentenv_file_permissions_win_test.go | 2 +- .../api/api_test.go | 2 +- .../config-refresh/config_endpoint.go | 0 .../config-refresh/docs.go | 0 .../config-refresh/fixtures/config.yaml.tmpl | 0 .../fixtures/secret-resolver.py | 0 .../fixtures/security-agent.yaml | 0 .../non_core_agents_sync_common.go | 0 .../non_core_agents_sync_nix_test.go | 2 +- .../non_core_agents_sync_win_test.go | 2 +- .../gui/gui_common.go | 0 .../gui/gui_nix_test.go | 0 .../gui/gui_win_test.go | 0 .../inventory/inventory_agent_test.go | 0 .../secret/fixtures/secret_script.py | 0 .../secret/secret_common_test.go | 0 .../secret/secret_nix_test.go | 2 +- .../secret/secret_win_test.go | 2 +- .../secretsutils/client.go | 0 .../secretsutils/fixtures/secret-resolver.py | 0 .../secretsutils/fixtures/secret_wrapper.bat | 0 .../secretsutils/helpers.go | 0 .../forwarder/nss_failover_test.go | 0 .../forwarder/testfixtures/config.yaml.tmpl | 0 .../testfixtures/custom_logs.yaml.tmpl | 0 .../hostname/imdsv2_transition_common_test.go | 0 .../ipc/docs.go | 0 .../ipc/fixtures/config.yaml.tmpl | 0 .../ipc/fixtures/security-agent.yaml | 0 .../ipc/ipc_security_common.go | 0 .../ipc/ipc_security_nix_test.go | 0 .../ipc/ipc_security_win_test.go | 0 .../secret/secret_nix_test.go | 2 +- .../secret/secret_win_test.go | 2 +- test/new-e2e/tests/apm/vm_test.go | 2 +- 98 files changed, 230 insertions(+), 164 deletions(-) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/api/api_test.go (99%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/config_endpoint.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/docs.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/fixtures/config.yaml.tmpl (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/fixtures/secret-resolver.py (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/fixtures/security-agent.yaml (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/non_core_agents_sync_common.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/non_core_agents_sync_nix_test.go (99%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/config-refresh/non_core_agents_sync_win_test.go (99%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/gui/gui_common.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/gui/gui_nix_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/gui/gui_win_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/inventory/inventory_agent_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secret/fixtures/secret_script.py (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secret/secret_common_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secret/secret_nix_test.go (97%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secret/secret_win_test.go (98%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secretsutils/client.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secretsutils/fixtures/secret-resolver.py (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secretsutils/fixtures/secret_wrapper.bat (100%) rename test/new-e2e/tests/{agent-shared-components => agent-configuration}/secretsutils/helpers.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/forwarder/nss_failover_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/forwarder/testfixtures/config.yaml.tmpl (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/forwarder/testfixtures/custom_logs.yaml.tmpl (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/hostname/imdsv2_transition_common_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/docs.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/fixtures/config.yaml.tmpl (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/fixtures/security-agent.yaml (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/ipc_security_common.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/ipc_security_nix_test.go (100%) rename test/new-e2e/tests/{agent-shared-components => agent-runtimes}/ipc/ipc_security_win_test.go (100%) diff --git a/.ddqa/config.toml b/.ddqa/config.toml index b19ea5b7a576a1..49035c733c47ea 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -42,6 +42,24 @@ github_team = "agent-shared-components" github_labels = ["team/agent-shared-components"] exclude_members = ["sgnn7", "truthbk", "cmourot"] +# Those will be uncommented once the proper issue types are created in JIRA. +# +# [teams."Agent Runtimes"] +# jira_project = "ARUN" +# jira_issue_type = "QA" +# jira_statuses = ["To Do", "In Progress", "Done"] +# github_team = "agent-runtimes" +# github_labels = ["team/agent-runtimes"] +# exclude_members = ["sgnn7", "cmourot"] +# +# [teams."Agent Configuration"] +# jira_project = "ACFG" +# jira_issue_type = "QA" +# jira_statuses = ["To Do", "In Progress", "Done"] +# github_team = "agent-configuration" +# github_labels = ["team/agent-configuration"] +# exclude_members = ["sgnn7"] + [teams."Agent Developer Tools"] jira_project = "ADXT" jira_issue_type = "QA" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b6516eeb7033bd..1246d2cb2f9da8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -16,7 +16,7 @@ /.* @DataDog/agent-devx-infra # changing the config of mockery will regenerate the mocks, so owners will have to review anyway /.mockery.yaml # do not notify anyone -/.go-version @DataDog/agent-shared-components @DataDog/agent-delivery +/.go-version @DataDog/agent-runtimes @DataDog/agent-delivery # Go linters and pre-commit config /.golangci.yml @DataDog/agent-devx-loops /.custom-gcl.yml @DataDog/agent-devx-loops @@ -45,9 +45,9 @@ /service.datadog.yaml @DataDog/agent-delivery /static-analysis.datadog.yml @DataDog/software-integrity-and-trust @DataDog/sdlc-security @DataDog/agent-devx-infra -/modules.yml @DataDog/agent-shared-components +/modules.yml @DataDog/agent-runtimes # if go.work changes then either .go-version or modules.yml changed too, so ASC might as well own it -/go.work @DataDog/agent-shared-components +/go.work @DataDog/agent-runtimes /.circleci/ @DataDog/agent-devx-infra @@ -58,10 +58,10 @@ /.github/workflows/serverless-binary-size.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/serverless-integration.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/cws-btfhub-sync.yml @DataDog/agent-security -/.github/workflows/gohai.yml @DataDog/agent-shared-components -/.github/workflows/go-update-commenter.yml @DataDog/agent-shared-components -/.github/workflows/update_dependencies.yml @DataDog/agent-shared-components -/.github/workflows/buildimages-update.yml @DataDog/agent-delivery @DataDog/agent-shared-components +/.github/workflows/gohai.yml @DataDog/agent-configuration +/.github/workflows/go-update-commenter.yml @DataDog/agent-runtimes +/.github/workflows/update_dependencies.yml @DataDog/agent-runtimes +/.github/workflows/buildimages-update.yml @DataDog/agent-delivery @DataDog/agent-runtimes /.github/workflows/collector-generate-and-update.yml @DataDog/opentelemetry /.run @DataDog/agent-devx-loops @@ -185,11 +185,12 @@ /chocolatey/ @DataDog/windows-agent -/cmd/ @DataDog/agent-shared-components +/cmd/ @DataDog/agent-configuration /cmd/trace-agent/ @DataDog/agent-apm /cmd/agent/subcommands/controlsvc @DataDog/windows-agent /cmd/agent/subcommands/dogstatsd* @DataDog/agent-metrics -/cmd/agent/subcommands/integrations @DataDog/agent-integrations @DataDog/agent-shared-components +/cmd/agent/subcommands/integrations @DataDog/agent-integrations @DataDog/agent-runtimes +/cmd/agent/subcommands/dogstatsd* @DataDog/agent-metrics-logs /cmd/agent/subcommands/remoteconfig @Datadog/remote-config /cmd/agent/subcommands/snmp @DataDog/ndm-core /cmd/agent/subcommands/streamlogs @DataDog/agent-logs @@ -257,10 +258,10 @@ /docs/dev/checks/ @DataDog/agent-metrics /docs/cloud-workload-security/ @DataDog/documentation @DataDog/agent-security -/docs/public/components/ @DataDog/agent-shared-components -/docs/public/hostname/ @DataDog/agent-shared-components +/docs/public/components/ @DataDog/agent-runtimes +/docs/public/hostname/ @DataDog/agent-runtimes /docs/public/architecture/dogstatsd/ @DataDog/agent-metrics -/docs/public/guidelines/deprecated-components-documentation/ @DataDog/agent-shared-components +/docs/public/guidelines/deprecated-components-documentation/ @DataDog/agent-runtimes /google-marketplace/ @DataDog/container-ecosystems @@ -274,7 +275,7 @@ /Makefile.trace @DataDog/agent-apm /omnibus/ @DataDog/agent-delivery -/omnibus/python-scripts/ @DataDog/agent-shared-components @DataDog/windows-agent +/omnibus/python-scripts/ @DataDog/agent-runtimes @DataDog/windows-agent /omnibus/config/patches/openscap/ @DataDog/agent-cspm /omnibus/config/software/datadog-agent-integrations-*.rb @DataDog/agent-integrations /omnibus/config/software/datadog-security-agent*.rb @Datadog/agent-security @DataDog/agent-delivery @@ -285,17 +286,17 @@ # The following is managed by `inv lint-components` -- DO NOT EDIT # BEGIN COMPONENTS -/comp @DataDog/agent-shared-components -/comp/agent @DataDog/agent-shared-components +/comp @DataDog/agent-runtimes +/comp/agent @DataDog/agent-runtimes /comp/aggregator @DataDog/agent-metrics -/comp/api @DataDog/agent-shared-components +/comp/api @DataDog/agent-runtimes /comp/checks @DataDog/agent-metrics /comp/collector @DataDog/agent-metrics -/comp/core @DataDog/agent-shared-components +/comp/core @DataDog/agent-runtimes /comp/dogstatsd @DataDog/agent-metrics /comp/forwarder @DataDog/agent-metrics /comp/logs @DataDog/agent-logs -/comp/metadata @DataDog/agent-shared-components +/comp/metadata @DataDog/agent-configuration /comp/ndmtmp @DataDog/ndm-core /comp/netflow @DataDog/ndm-integrations /comp/networkpath @DataDog/Networks @DataDog/network-device-monitoring @@ -308,11 +309,18 @@ /comp/updater @DataDog/fleet @DataDog/windows-agent /comp/agent/cloudfoundrycontainer @DataDog/agent-integrations /comp/agent/jmxlogger @DataDog/agent-metrics -/comp/aggregator/diagnosesendermanager @DataDog/agent-shared-components +/comp/aggregator/diagnosesendermanager @DataDog/agent-configuration /comp/checks/agentcrashdetect @DataDog/windows-kernel-integrations /comp/checks/windowseventlog @DataDog/windows-agent /comp/checks/winregistry @DataDog/windows-agent /comp/core/autodiscovery @DataDog/container-platform +/comp/core/config @DataDog/agent-configuration +/comp/core/configsync @DataDog/agent-configuration +/comp/core/flare @DataDog/agent-configuration +/comp/core/gui @DataDog/agent-configuration +/comp/core/secrets @DataDog/agent-configuration +/comp/core/settings @DataDog/agent-configuration +/comp/core/status @DataDog/agent-configuration /comp/core/sysprobeconfig @DataDog/ebpf-platform /comp/core/tagger @DataDog/container-platform /comp/core/workloadmeta @DataDog/container-platform @@ -332,23 +340,23 @@ # END COMPONENTS # Additional notification to @iglendd about Agent Telemetry changes for optional approval and governance acknowledgement -/comp/core/agenttelemetry @DataDog/agent-shared-components @iglendd +/comp/core/agenttelemetry @DataDog/agent-runtimes @iglendd # trace-agent logging implementation should also notify agent-apm /comp/core/log/impl-trace @DataDog/agent-apm # pkg -/pkg/ @DataDog/agent-shared-components -/pkg/api/ @DataDog/agent-shared-components +/pkg/ @DataDog/agent-runtimes +/pkg/api/ @DataDog/agent-runtimes /pkg/aggregator/ @DataDog/agent-metrics /pkg/collector/ @DataDog/agent-metrics /pkg/commonchecks/ @DataDog/agent-metrics -/pkg/cli/ @DataDog/agent-shared-components +/pkg/cli/ @DataDog/agent-configuration /pkg/cli/subcommands/clusterchecks @DataDog/container-platform /pkg/discovery/ @DataDog/agent-discovery -/pkg/errors/ @DataDog/agent-shared-components -/pkg/fips @DataDog/agent-shared-components -/pkg/gohai @DataDog/agent-shared-components +/pkg/errors/ @DataDog/agent-runtimes +/pkg/fips @DataDog/agent-runtimes +/pkg/gohai @DataDog/agent-configuration /pkg/gpu/ @DataDog/ebpf-platform /pkg/jmxfetch/ @DataDog/agent-metrics /pkg/metrics/ @DataDog/agent-metrics @@ -357,11 +365,11 @@ /pkg/serializer/internal/metrics/origin_mapping.go @DataDog/agent-metrics @DataDog/agent-integrations /pkg/serverless/ @DataDog/serverless @Datadog/serverless-aws /pkg/serverless/appsec/ @DataDog/asm-go -/pkg/status/ @DataDog/agent-shared-components -/pkg/telemetry/ @DataDog/agent-shared-components +/pkg/status/ @DataDog/agent-configuration +/pkg/telemetry/ @DataDog/agent-runtimes /pkg/telemetry/stat_gauge_wrapper.go @DataDog/ebpf-platform /pkg/telemetry/stat_counter_wrapper.go @DataDog/ebpf-platform -/pkg/version/ @DataDog/agent-shared-components +/pkg/version/ @DataDog/agent-runtimes /pkg/obfuscate/ @DataDog/agent-apm /pkg/trace/ @DataDog/agent-apm /pkg/trace/api/otlp*.go @DataDog/opentelemetry @@ -401,57 +409,57 @@ /pkg/collector/corechecks/gpu/ @DataDog/ebpf-platform /pkg/collector/corechecks/network-devices/ @DataDog/ndm-integrations /pkg/collector/corechecks/orchestrator/ @DataDog/container-app -/pkg/collector/corechecks/net/ @DataDog/agent-shared-components +/pkg/collector/corechecks/net/ @DataDog/agent-runtimes /pkg/collector/corechecks/oracle @DataDog/database-monitoring /pkg/collector/corechecks/sbom/ @DataDog/container-integrations /pkg/collector/corechecks/servicediscovery/ @DataDog/agent-discovery /pkg/collector/corechecks/snmp/ @DataDog/ndm-core -/pkg/collector/corechecks/system/ @DataDog/agent-shared-components -/pkg/collector/corechecks/system/**/*_windows*.go @DataDog/agent-shared-components @DataDog/windows-agent +/pkg/collector/corechecks/system/ @DataDog/agent-runtimes +/pkg/collector/corechecks/system/**/*_windows*.go @DataDog/agent-runtimes @DataDog/windows-agent /pkg/collector/corechecks/system/wincrashdetect/ @DataDog/windows-kernel-integrations /pkg/collector/corechecks/system/winkmem/ @DataDog/windows-agent /pkg/collector/corechecks/system/winproc/ @DataDog/windows-agent /pkg/collector/corechecks/systemd/ @DataDog/agent-integrations -/pkg/collector/corechecks/nvidia/ @DataDog/agent-shared-components -/pkg/config/ @DataDog/agent-shared-components -/pkg/config/config_template.yaml @DataDog/agent-shared-components @DataDog/documentation -/pkg/config/setup/apm.go @DataDog/agent-apm -/pkg/config/autodiscovery/ @DataDog/container-integrations @DataDog/container-platform -/pkg/config/env @DataDog/container-integrations @DataDog/container-platform -/pkg/config/setup @DataDog/agent-shared-components -/pkg/config/setup/process*.go @DataDog/container-intake -/pkg/config/setup/system_probe.go @DataDog/ebpf-platform -/pkg/config/setup/system_probe_cws.go @DataDog/agent-security -/pkg/config/setup/system_probe_cws_notwin.go @DataDog/agent-security -/pkg/config/setup/system_probe_cws_windows.go @DataDog/windows-kernel-integrations -/pkg/config/setup/security_agent.go @DataDog/agent-security +/pkg/collector/corechecks/nvidia/ @DataDog/agent-runtimes +/pkg/config/ @DataDog/agent-configuration +/pkg/config/config_template.yaml @DataDog/agent-configuration @DataDog/documentation +/pkg/config/setup/apm.go @DataDog/agent-apm @DataDog/agent-configuration +/pkg/config/autodiscovery/ @DataDog/container-integrations @DataDog/container-platform @DataDog/agent-configuration +/pkg/config/env @DataDog/container-integrations @DataDog/container-platform @DataDog/agent-configuration +/pkg/config/setup @DataDog/agent-configuration +/pkg/config/setup/process*.go @DataDog/container-intake @DataDog/agent-configuration +/pkg/config/setup/system_probe.go @DataDog/ebpf-platform @DataDog/agent-configuration +/pkg/config/setup/system_probe_cws.go @DataDog/agent-security @DataDog/agent-configuration +/pkg/config/setup/system_probe_cws_notwin.go @DataDog/agent-security @DataDog/agent-configuration +/pkg/config/setup/system_probe_cws_windows.go @DataDog/windows-kernel-integrations @DataDog/agent-configuration +/pkg/config/setup/security_agent.go @DataDog/agent-security @DataDog/agent-configuration /pkg/config/remote/ @DataDog/remote-config /pkg/config/remote/meta/ @DataDog/remote-config @DataDog/software-integrity-and-trust @DataDog/sdlc-security /pkg/containerlifecycle/ @Datadog/container-integrations /pkg/diagnose/ @Datadog/container-platform -/pkg/diagnose/connectivity/ @DataDog/agent-shared-components -/pkg/diagnose/ports/ @DataDog/agent-shared-components +/pkg/diagnose/connectivity/ @DataDog/agent-configuration +/pkg/diagnose/ports/ @DataDog/agent-configuration /pkg/diagnose/ports/*windows*.go @DataDog/windows-agent /pkg/eventmonitor/ @DataDog/ebpf-platform @DataDog/agent-security /pkg/dynamicinstrumentation/ @DataDog/debugger -/pkg/flare/ @DataDog/agent-shared-components -/pkg/flare/clusteragent/manifests.go @DataDog/container-ecosystems -/pkg/flare/*_win.go @Datadog/windows-agent -/pkg/flare/*_windows.go @Datadog/windows-agent -/pkg/flare/*_windows_test.go @Datadog/windows-agent +/pkg/flare/ @DataDog/agent-configuration +/pkg/flare/clusteragent/manifests.go @DataDog/container-ecosystems @DataDog/agent-configuration +/pkg/flare/*_win.go @Datadog/windows-agent @DataDog/agent-configuration +/pkg/flare/*_windows.go @Datadog/windows-agent @DataDog/agent-configuration +/pkg/flare/*_windows_test.go @Datadog/windows-agent @DataDog/agent-configuration /pkg/fleet/ @DataDog/fleet @DataDog/windows-agent /pkg/fleet/installer/setup/djm/ @DataDog/fleet @DataDog/data-jobs-monitoring -/pkg/pidfile/ @DataDog/agent-shared-components +/pkg/pidfile/ @DataDog/agent-runtimes /pkg/persistentcache/ @DataDog/agent-metrics -/pkg/proto/ @DataDog/agent-shared-components +/pkg/proto/ @DataDog/agent-runtimes /pkg/proto/datadog/languagedetection @DataDog/container-intake /pkg/proto/datadog/process @DataDog/container-intake /pkg/proto/datadog/trace @DataDog/agent-apm /pkg/proto/datadog/workloadmeta @DataDog/container-platform /pkg/remoteconfig/ @DataDog/remote-config -/pkg/runtime/ @DataDog/agent-shared-components -/pkg/tagset/ @DataDog/agent-shared-components -/pkg/util/ @DataDog/agent-shared-components +/pkg/runtime/ @DataDog/agent-runtimes +/pkg/tagset/ @DataDog/agent-runtimes +/pkg/util/ @DataDog/agent-runtimes /pkg/util/aggregatingqueue @DataDog/container-integrations @DataDog/container-platform /pkg/util/cloudproviders/cloudfoundry/ @DataDog/agent-integrations /pkg/util/clusteragent/ @DataDog/container-platform @@ -469,12 +477,12 @@ /pkg/util/ktime @DataDog/agent-security /pkg/util/kubernetes/ @DataDog/container-integrations @DataDog/container-platform @DataDog/container-app /pkg/util/podman/ @DataDog/container-integrations -/pkg/util/port/ @DataDog/agent-shared-components +/pkg/util/port/ @DataDog/agent-runtimes /pkg/util/port/portlist/*windows*.go @DataDog/windows-agent /pkg/util/prometheus @DataDog/container-integrations /pkg/util/tags/ @DataDog/container-platform /pkg/util/trivy/ @DataDog/container-integrations @DataDog/agent-security -/pkg/util/uuid/ @DataDog/agent-shared-components +/pkg/util/uuid/ @DataDog/agent-runtimes /pkg/util/cgroups/ @DataDog/container-integrations /pkg/util/retry/ @DataDog/container-platform /pkg/util/intern/ @DataDog/ebpf-platform @@ -487,7 +495,7 @@ /pkg/util/trie @DataDog/container-integrations /pkg/languagedetection @DataDog/container-intake @DataDog/universal-service-monitoring @DataDog/agent-discovery /pkg/linters/ @DataDog/agent-devx-loops -/pkg/linters/components/ @DataDog/agent-shared-components +/pkg/linters/components/ @DataDog/agent-runtimes /pkg/logs/ @DataDog/agent-logs /pkg/logs/launchers/windowsevent @DataDog/agent-logs @DataDog/windows-agent /pkg/logs/tailers/windowsevent @DataDog/agent-logs @DataDog/windows-agent @@ -511,11 +519,11 @@ /pkg/proto/pbgo/trace @DataDog/agent-apm /pkg/proto/pbgo/languagedetection @DataDog/agent-apm /pkg/proto/pbgo/process @DataDog/container-intake -/pkg/proto/pbgo/core @DataDog/agent-shared-components +/pkg/proto/pbgo/core @DataDog/agent-runtimes /pkg/proto/pbgo/core/remoteconfig.pb.go @DataDog/remote-config /pkg/proto/pbgo/core/remoteconfig_gen.go @DataDog/remote-config /pkg/proto/pbgo/core/remoteconfig_gen_test.go @DataDog/remote-config -/pkg/proto/pbgo/mocks/core @DataDog/agent-shared-components +/pkg/proto/pbgo/mocks/core @DataDog/agent-runtimes /pkg/orchestrator/ @DataDog/container-app /pkg/network/ @DataDog/Networks /pkg/network/*usm* @DataDog/universal-service-monitoring @@ -565,11 +573,11 @@ /tasks/ @DataDog/agent-devx-loops @DataDog/agent-devx-infra /tasks/msi.py @DataDog/windows-agent -/tasks/agent.py @DataDog/agent-shared-components -/tasks/go_deps.py @DataDog/agent-shared-components +/tasks/agent.py @DataDog/agent-runtimes +/tasks/go_deps.py @DataDog/agent-runtimes /tasks/dogstatsd.py @DataDog/agent-metrics -/tasks/update_go.py @DataDog/agent-shared-components -/tasks/unit_tests/update_go_tests.py @DataDog/agent-shared-components +/tasks/update_go.py @DataDog/agent-runtimes +/tasks/unit_tests/update_go_tests.py @DataDog/agent-runtimes /tasks/cluster_agent_cloudfoundry.py @DataDog/agent-integrations /tasks/new_e2e_tests.py @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /tasks/process_agent.py @DataDog/container-intake @@ -587,15 +595,15 @@ /tasks/winbuild.py @DataDog/windows-agent /tasks/windows_resources.py @DataDog/windows-agent /tasks/collector.py @DataDog/opentelemetry -/tasks/components.py @DataDog/agent-shared-components -/tasks/components_templates @DataDog/agent-shared-components +/tasks/components.py @DataDog/agent-runtimes +/tasks/components_templates @DataDog/agent-runtimes /tasks/libs/ciproviders/ @DataDog/agent-devx-infra /tasks/libs/common/omnibus.py @DataDog/agent-delivery /tasks/omnibus.py @DataDog/agent-delivery /tasks/release.py @DataDog/agent-delivery -/tasks/unit_tests/components_tests.py @DataDog/agent-shared-components +/tasks/unit_tests/components_tests.py @DataDog/agent-runtimes /tasks/unit_tests/omnibus_tests.py @DataDog/agent-delivery -/tasks/unit_tests/testdata/components_src/ @DataDog/agent-shared-components +/tasks/unit_tests/testdata/components_src/ @DataDog/agent-runtimes /tasks/installer.py @DataDog/fleet /test/ @DataDog/agent-devx-loops /test/benchmarks/ @DataDog/agent-metrics @@ -614,11 +622,12 @@ /test/new-e2e/system-probe @DataDog/ebpf-platform /test/new-e2e/scenarios/system-probe @DataDog/ebpf-platform /test/new-e2e/tests/agent-platform @DataDog/container-ecosystems @DataDog/agent-delivery @DataDog/agent-devx-loops -/test/new-e2e/tests/agent-shared-components @DataDog/agent-shared-components -/test/new-e2e/tests/agent-subcommands @DataDog/agent-shared-components +/test/new-e2e/tests/agent-runtimes @DataDog/agent-runtimes +/test/new-e2e/tests/agent-configuration @DataDog/agent-configuration +/test/new-e2e/tests/agent-subcommands @DataDog/agent-configuration /test/new-e2e/tests/containers @DataDog/container-integrations @DataDog/container-platform /test/new-e2e/tests/discovery @DataDog/agent-discovery -/test/new-e2e/tests/fips-compliance @DataDog/agent-shared-components +/test/new-e2e/tests/fips-compliance @DataDog/agent-runtimes /test/new-e2e/tests/ha-agent @DataDog/ndm-core /test/new-e2e/tests/language-detection @DataDog/container-intake /test/new-e2e/tests/ndm @DataDog/ndm-core @@ -640,7 +649,7 @@ /test/new-e2e/tests/installer/script @DataDog/fleet @DataDog/data-jobs-monitoring /test/new-e2e/tests/gpu @Datadog/ebpf-platform /test/otel/ @DataDog/opentelemetry -/test/system/ @DataDog/agent-shared-components +/test/system/ @DataDog/agent-runtimes /test/system/dogstatsd/ @DataDog/agent-metrics /test/benchmarks/apm_scripts/ @DataDog/agent-apm /test/regression/ @DataDog/single-machine-performance @@ -648,8 +657,8 @@ /tools/ @DataDog/agent-devx-loops /tools/ci @DataDog/agent-devx-infra /tools/ebpf/ @DataDog/ebpf-platform -/tools/gdb/ @DataDog/agent-shared-components -/tools/go-update/ @DataDog/agent-shared-components +/tools/gdb/ @DataDog/agent-runtimes +/tools/go-update/ @DataDog/agent-runtimes /tools/NamedPipeCmd/ @DataDog/windows-kernel-integrations /tools/retry_file_dump/ @DataDog/agent-metrics /tools/windows/ @DataDog/windows-agent @@ -671,5 +680,5 @@ /internal/tools/**/go.mod @DataDog/agent-devx-loops /internal/tools/**/go.sum @DataDog/agent-devx-loops -/pkg/util/scrubber/go.mod @DataDog/agent-shared-components -/pkg/util/scrubber/go.sum @DataDog/agent-shared-components +/pkg/util/scrubber/go.mod @DataDog/agent-runtimes +/pkg/util/scrubber/go.sum @DataDog/agent-runtimes diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index f8f32aa0984434..0c58dbf1333433 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -64,7 +64,7 @@ updates: labels: - dependencies - dependencies-go - - team/agent-shared-components + - team/agent-configuration - changelog/no-changelog ignore: # Ignore internal modules diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml index 419ec2016efcac..e5d19b036dc841 100644 --- a/.github/workflows/buildimages-update.yml +++ b/.github/workflows/buildimages-update.yml @@ -141,7 +141,7 @@ jobs: TMP_PR_BODY_PATH: /tmp/pr_body GH_TOKEN: ${{ github.token }} PR_TITLE: "[automated] ${{ steps.update_build_images.outputs.MESSAGE }}" - PR_LABELS: "go-update,team/agent-shared-components" + PR_LABELS: "go-update,team/agent-runtimes" CURRENT_BUILDIMAGE_TAG: ${{ steps.current_buildimage_tag.outputs.BUILDIMAGE_TAG }} IMAGES_ID: ${{ inputs.images_id }} CURRENT_GO_VERSION: ${{ steps.current_go_version.outputs.GO_VERSION }} diff --git a/.github/workflows/update_dependencies.yml b/.github/workflows/update_dependencies.yml index a5f0e66454af40..c45a9708af71ed 100644 --- a/.github/workflows/update_dependencies.yml +++ b/.github/workflows/update_dependencies.yml @@ -60,7 +60,7 @@ jobs: TMP_PR_BODY_PATH: /tmp/pr_body GH_TOKEN: ${{ steps.app-token.outputs.token }} PR_TITLE: "[automated] Update golang.org/x/... dependencies" - PR_LABELS: "team/agent-shared-components,qa/done,changelog/no-changelog" + PR_LABELS: "team/agent-runtimes,qa/done,changelog/no-changelog" GITHUB_REF: ${{ github.ref }} run: | # Generate the PR description diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 37bd03600c27d9..6f306bc1990bbb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -821,14 +821,24 @@ workflow: - test/new-e2e/tests/remote-config/**/* compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 -.on_asc_or_e2e_changes: +.on_arun_or_e2e_changes: - !reference [.on_e2e_main_release_or_rc] - changes: paths: - cmd/**/* - pkg/**/* - comp/**/* - - test/new-e2e/tests/agent-shared-components/**/* + - test/new-e2e/tests/agent-runtimes/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + +.on_acfg_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - cmd/**/* + - pkg/**/* + - comp/**/* + - test/new-e2e/tests/agent-configuration//**/* compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 .on_subcommands_or_e2e_changes: diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index 4db304d41900fa..f0caeeddd203ce 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -27,7 +27,7 @@ prepare_secagent_ebpf_functional_tests* @DataDog/agent-security protobuf_test @DataDog/multiple # Send count metrics about Golang dependencies -golang_deps_send_count_metrics @DataDog/agent-shared-components +golang_deps_send_count_metrics @DataDog/agent-runtimes # Golang dependency diff generation golang_deps_diff @DataDog/ebpf-platform golang_deps_commenter @DataDog/ebpf-platform diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 7ff524d9bd268b..5ebc7fd5906b58 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -166,14 +166,23 @@ new-e2e-remote-config: TARGETS: ./tests/remote-config TEAM: remote-config -new-e2e-agent-shared-components: +new-e2e-agent-configuration: extends: .new_e2e_template_needs_deb_windows_x64 rules: - - !reference [.on_asc_or_e2e_changes] + - !reference [.on_acfg_or_e2e_changes] - !reference [.manual] variables: - TARGETS: ./tests/agent-shared-components - TEAM: agent-shared-components + TARGETS: ./tests/agent-configuration + TEAM: agent-configuration + +new-e2e-agent-runtimes: + extends: .new_e2e_template_needs_deb_windows_x64 + rules: + - !reference [.on_arun_or_e2e_changes] + - !reference [.manual] + variables: + TARGETS: ./tests/agent-runtimes + TEAM: agent-runtimes new-e2e-agent-subcommands: extends: .new_e2e_template_needs_deb_windows_x64 @@ -182,7 +191,7 @@ new-e2e-agent-subcommands: - !reference [.manual] variables: TARGETS: ./tests/agent-subcommands - TEAM: agent-shared-components + TEAM: agent-configuration parallel: matrix: - EXTRA_PARAMS: --run "Test(Linux|Windows)StatusSuite" @@ -203,11 +212,11 @@ new-e2e-fips-compliance-test: - qa_agent_fips - deploy_deb_testing-a7_x64 rules: - - !reference [.on_asc_or_e2e_changes] + - !reference [.on_arun_or_e2e_changes] - !reference [.manual] variables: TARGETS: ./tests/fips-compliance - TEAM: agent-shared-components + TEAM: agent-runtimes new-e2e-windows-service-test: extends: .new_e2e_template diff --git a/comp/README.md b/comp/README.md index 045e8ee9bdd9b9..22c8aee34ab2fa 100644 --- a/comp/README.md +++ b/comp/README.md @@ -6,7 +6,7 @@ Click the links for more documentation. ## [comp/agent](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/agent) (Component Bundle) -*Datadog Team*: agent-shared-components +*Datadog Team*: agent-runtimes Package agent implements the "agent" bundle, @@ -46,13 +46,13 @@ Package demultiplexerendpoint component provides the /dogstatsd-contexts-dump AP ### [comp/aggregator/diagnosesendermanager](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/aggregator/diagnosesendermanager) -*Datadog Team*: agent-shared-components +*Datadog Team*: agent-configuration Package diagnosesendermanager defines the sender manager for the local diagnose check ## [comp/api](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/api) (Component Bundle) -*Datadog Team*: agent-shared-components +*Datadog Team*: agent-runtimes Package api implements the "api" bundle, @@ -102,7 +102,7 @@ Package collector defines the collector component. ## [comp/core](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core) (Component Bundle) -*Datadog Team*: agent-shared-components +*Datadog Team*: agent-runtimes Package core implements the "core" bundle, providing services common to all agent flavors and binaries. @@ -119,19 +119,27 @@ Package autodiscovery provides the autodiscovery component for the Datadog Agent ### [comp/core/config](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/config) +*Datadog Team*: agent-configuration + Package config implements a component to handle agent configuration. This component temporarily wraps pkg/config. ### [comp/core/configsync](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/configsync) +*Datadog Team*: agent-configuration + Package configsync implements synchronizing the configuration using the core agent config API ### [comp/core/flare](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/flare) +*Datadog Team*: agent-configuration + Package flare implements a component to generate flares from the agent. ### [comp/core/gui](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/gui) +*Datadog Team*: agent-configuration + Package gui provides the GUI server component for the Datadog Agent. ### [comp/core/healthprobe](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/healthprobe) @@ -166,14 +174,20 @@ status and emit flare data ### [comp/core/secrets](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/secrets) +*Datadog Team*: agent-configuration + Package secrets decodes secret values by invoking the configured executable command ### [comp/core/settings](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/settings) +*Datadog Team*: agent-configuration + Package settings defines the interface for the component that manage settings that can be changed at runtime ### [comp/core/status](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/status) +*Datadog Team*: agent-configuration + Package status displays information about the agent. ### [comp/core/sysprobeconfig](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/sysprobeconfig) @@ -282,7 +296,7 @@ send logs. ## [comp/metadata](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/metadata) (Component Bundle) -*Datadog Team*: agent-shared-components +*Datadog Team*: agent-configuration Package metadata implements the "metadata" bundle, providing services and support for all the metadata payload sent by the Agent. diff --git a/comp/agent/autoexit/component.go b/comp/agent/autoexit/component.go index 1a5210767511ee..a0bee2359e4fae 100644 --- a/comp/agent/autoexit/component.go +++ b/comp/agent/autoexit/component.go @@ -6,7 +6,7 @@ // Package autoexit lets setup automatic shutdown mechanism if necessary package autoexit -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/agent/bundle.go b/comp/agent/bundle.go index 5e550c0e1ee080..bacff1ab86cabd 100644 --- a/comp/agent/bundle.go +++ b/comp/agent/bundle.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-shared-components +// team: agent-runtimes // Bundle defines the fx options for this bundle. func Bundle(params jmxloggerimpl.Params) fxutil.BundleOptions { diff --git a/comp/agent/expvarserver/component.go b/comp/agent/expvarserver/component.go index ec329b5aed26e9..97fa50e8bb150f 100644 --- a/comp/agent/expvarserver/component.go +++ b/comp/agent/expvarserver/component.go @@ -6,7 +6,7 @@ // Package expvarserver contains the component type for the expVar server. package expvarserver -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface{} diff --git a/comp/aggregator/diagnosesendermanager/component.go b/comp/aggregator/diagnosesendermanager/component.go index e5f68ce9607196..f58e93c6a716e7 100644 --- a/comp/aggregator/diagnosesendermanager/component.go +++ b/comp/aggregator/diagnosesendermanager/component.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" ) -// team: agent-shared-components +// team: agent-configuration // Component is the component type. // This component must not be used with demultiplexer.Component diff --git a/comp/api/api/def/component.go b/comp/api/api/def/component.go index 02c3aae88a2310..2447217a4eb403 100644 --- a/comp/api/api/def/component.go +++ b/comp/api/api/def/component.go @@ -13,7 +13,7 @@ import ( "go.uber.org/fx" ) -// team: agent-shared-components +// team: agent-runtimes // TODO(components): // * Lifecycle can't be used atm because: diff --git a/comp/api/authtoken/component.go b/comp/api/authtoken/component.go index f0247bd7a679e4..c1d59e5658c985 100644 --- a/comp/api/authtoken/component.go +++ b/comp/api/authtoken/component.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/option" ) -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/api/bundle.go b/comp/api/bundle.go index b352de03eb1987..3d562fe33f8e69 100644 --- a/comp/api/bundle.go +++ b/comp/api/bundle.go @@ -8,7 +8,7 @@ package api import "github.com/DataDog/datadog-agent/pkg/util/fxutil" -// team: agent-shared-components +// team: agent-runtimes // TODO(components): empty bundle for the linter. Might be removed later diff --git a/comp/core/agenttelemetry/def/component.go b/comp/core/agenttelemetry/def/component.go index 06563b2de91922..d0b01c7aa54d34 100644 --- a/comp/core/agenttelemetry/def/component.go +++ b/comp/core/agenttelemetry/def/component.go @@ -6,7 +6,7 @@ // Package agenttelemetry implements a component to generate Agent telemetry package agenttelemetry -// team: agent-shared-components +// team: agent-runtimes // Component is the component type type Component interface { diff --git a/comp/core/bundle.go b/comp/core/bundle.go index b75f5a8a76dfed..749537bbe861e4 100644 --- a/comp/core/bundle.go +++ b/comp/core/bundle.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/option" ) -// team: agent-shared-components +// team: agent-runtimes // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { diff --git a/comp/core/bundle_mock.go b/comp/core/bundle_mock.go index c16d5cce0714e1..089e645b3403da 100644 --- a/comp/core/bundle_mock.go +++ b/comp/core/bundle_mock.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-shared-components +// team: agent-runtimes // MakeMockBundle returns a core bundle with a customized set of fx.Option including sane defaults. func MakeMockBundle(logParams, logger fx.Option) fxutil.BundleOptions { diff --git a/comp/core/config/component.go b/comp/core/config/component.go index 9cc7e995755872..eebbe611d4e24c 100644 --- a/comp/core/config/component.go +++ b/comp/core/config/component.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/core/configsync/component.go b/comp/core/configsync/component.go index 4d25920a56da3f..542a022d307ccf 100644 --- a/comp/core/configsync/component.go +++ b/comp/core/configsync/component.go @@ -6,7 +6,7 @@ // Package configsync implements synchronizing the configuration using the core agent config API package configsync -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface{} diff --git a/comp/core/flare/component.go b/comp/core/flare/component.go index 9921e8e32d7663..3f34814d4556e0 100644 --- a/comp/core/flare/component.go +++ b/comp/core/flare/component.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/core/gui/component.go b/comp/core/gui/component.go index 1069c5ed9a6ebe..7892089bf95e5f 100644 --- a/comp/core/gui/component.go +++ b/comp/core/gui/component.go @@ -6,7 +6,7 @@ // Package gui provides the GUI server component for the Datadog Agent. package gui -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/core/healthprobe/def/component.go b/comp/core/healthprobe/def/component.go index 028b515dcb2a76..893352cf35d716 100644 --- a/comp/core/healthprobe/def/component.go +++ b/comp/core/healthprobe/def/component.go @@ -6,7 +6,7 @@ // Package healthprobe implements the health check server package healthprobe -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/core/hostname/component.go b/comp/core/hostname/component.go index 0a90964a3c0d6f..b01530f32bfa8e 100644 --- a/comp/core/hostname/component.go +++ b/comp/core/hostname/component.go @@ -8,7 +8,7 @@ package hostname import "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component = hostnameinterface.Component diff --git a/comp/core/hostname/hostnameinterface/component.go b/comp/core/hostname/hostnameinterface/component.go index afbc02b366748b..075670a9fdd2d1 100644 --- a/comp/core/hostname/hostnameinterface/component.go +++ b/comp/core/hostname/hostnameinterface/component.go @@ -10,7 +10,7 @@ import ( "context" ) -// team: agent-shared-components +// team: agent-runtimes // Data contains hostname and the hostname provider type Data struct { diff --git a/comp/core/log/def/component.go b/comp/core/log/def/component.go index 17cb1eab149042..b6f32798f990fe 100644 --- a/comp/core/log/def/component.go +++ b/comp/core/log/def/component.go @@ -14,7 +14,7 @@ // logging output to `t.Log(..)`, for ease of investigation when a test fails. package log -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/core/lsof/def/component.go b/comp/core/lsof/def/component.go index 2ff4766afa9f5f..46f628caf4dbea 100644 --- a/comp/core/lsof/def/component.go +++ b/comp/core/lsof/def/component.go @@ -6,7 +6,7 @@ // Package lsof provides a flare file with data about files opened by the agent process package lsof -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/core/pid/component.go b/comp/core/pid/component.go index df4a4100f81d10..ad1a3b4ffd687b 100644 --- a/comp/core/pid/component.go +++ b/comp/core/pid/component.go @@ -7,7 +7,7 @@ // doesn't exist or doesn't contain a PID for a running process. package pid -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface{} diff --git a/comp/core/remoteagentregistry/def/component.go b/comp/core/remoteagentregistry/def/component.go index 71fe2ff4fa12c0..2b3e68926d5ccc 100644 --- a/comp/core/remoteagentregistry/def/component.go +++ b/comp/core/remoteagentregistry/def/component.go @@ -7,7 +7,7 @@ // status and emit flare data package remoteagentregistry -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/core/secrets/component.go b/comp/core/secrets/component.go index df8b6b7e0d090d..fa774e801b30f9 100644 --- a/comp/core/secrets/component.go +++ b/comp/core/secrets/component.go @@ -10,7 +10,7 @@ import ( "io" ) -// team: agent-shared-components +// team: agent-configuration // ConfigParams holds parameters for configuration type ConfigParams struct { diff --git a/comp/core/settings/component.go b/comp/core/settings/component.go index 948323cea3947b..3ed46c9e1cdf8c 100644 --- a/comp/core/settings/component.go +++ b/comp/core/settings/component.go @@ -15,7 +15,7 @@ import ( "go.uber.org/fx" ) -// team: agent-shared-components +// team: agent-configuration // SettingNotFoundError is used to warn about non existing/not registered runtime setting type SettingNotFoundError struct { diff --git a/comp/core/status/component.go b/comp/core/status/component.go index 9ea9e5d9fcde20..736c8ff2fa6601 100644 --- a/comp/core/status/component.go +++ b/comp/core/status/component.go @@ -12,7 +12,7 @@ import ( "go.uber.org/fx" ) -// team: agent-shared-components +// team: agent-configuration // CollectorSection stores the collector section name const CollectorSection string = "collector" diff --git a/comp/core/telemetry/component.go b/comp/core/telemetry/component.go index 652d3becd19ee5..6b27e7431c6a97 100644 --- a/comp/core/telemetry/component.go +++ b/comp/core/telemetry/component.go @@ -10,7 +10,7 @@ import ( "net/http" ) -// team: agent-shared-components +// team: agent-runtimes // Component is the component type. type Component interface { diff --git a/comp/metadata/bundle.go b/comp/metadata/bundle.go index 79b0dd0c571840..a68cd77f59a8eb 100644 --- a/comp/metadata/bundle.go +++ b/comp/metadata/bundle.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: agent-shared-components +// team: agent-configuration // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { diff --git a/comp/metadata/host/component.go b/comp/metadata/host/component.go index ffb748c35fd637..ab9f439bbd90ab 100644 --- a/comp/metadata/host/component.go +++ b/comp/metadata/host/component.go @@ -10,7 +10,7 @@ import ( "context" ) -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/metadata/host/component_mock.go b/comp/metadata/host/component_mock.go index 1a774bb5ab6499..585dbf87434e53 100644 --- a/comp/metadata/host/component_mock.go +++ b/comp/metadata/host/component_mock.go @@ -7,7 +7,7 @@ package host -// team: agent-shared-components +// team: agent-configuration // Mock is a wrapper of the component type. type Mock interface { diff --git a/comp/metadata/inventoryagent/component.go b/comp/metadata/inventoryagent/component.go index 2cbc41c3cfccb1..a71b0a017dca8f 100644 --- a/comp/metadata/inventoryagent/component.go +++ b/comp/metadata/inventoryagent/component.go @@ -6,7 +6,7 @@ // Package inventoryagent implements a component to generate the 'datadog_agent' metadata payload for inventory. package inventoryagent -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/metadata/inventorychecks/component.go b/comp/metadata/inventorychecks/component.go index bb3286a714be33..85f7399403af66 100644 --- a/comp/metadata/inventorychecks/component.go +++ b/comp/metadata/inventorychecks/component.go @@ -6,7 +6,7 @@ // Package inventorychecks implements a component to generate the 'check_metadata' metadata payload for inventory. package inventorychecks -// team: agent-shared-components +// team: agent-configuration // Component is the component type. // diff --git a/comp/metadata/inventoryhost/component.go b/comp/metadata/inventoryhost/component.go index 10927e8858e413..7a538ecf94ceb4 100644 --- a/comp/metadata/inventoryhost/component.go +++ b/comp/metadata/inventoryhost/component.go @@ -6,7 +6,7 @@ // Package inventoryhost exposes the interface for the component to generate the 'host_metadata' metadata payload for inventory. package inventoryhost -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/metadata/inventoryotel/component.go b/comp/metadata/inventoryotel/component.go index 58fd5ec440e10b..3996d16d8a3ca0 100644 --- a/comp/metadata/inventoryotel/component.go +++ b/comp/metadata/inventoryotel/component.go @@ -6,7 +6,7 @@ // Package inventoryotel implements a component to generate the 'datadog_agent' metadata payload for inventory. package inventoryotel -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/metadata/resources/component.go b/comp/metadata/resources/component.go index 28219f3775fa6f..93f51d18c93452 100644 --- a/comp/metadata/resources/component.go +++ b/comp/metadata/resources/component.go @@ -6,7 +6,7 @@ // Package resources implements a component to generate the 'resources' metadata payload. package resources -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface { diff --git a/comp/metadata/runner/component.go b/comp/metadata/runner/component.go index 1f2e93fcb79130..b2e67e4a9651bb 100644 --- a/comp/metadata/runner/component.go +++ b/comp/metadata/runner/component.go @@ -6,7 +6,7 @@ // Package runner implements a component to generate metadata payload at the right interval. package runner -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface{} diff --git a/comp/metadata/securityagent/def/component.go b/comp/metadata/securityagent/def/component.go index 31553c1bc39ab4..c3c15fd3fe2fbb 100644 --- a/comp/metadata/securityagent/def/component.go +++ b/comp/metadata/securityagent/def/component.go @@ -6,7 +6,7 @@ // Package securityagent is the metadata provider for security-agent process package securityagent -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface{} diff --git a/comp/metadata/systemprobe/def/component.go b/comp/metadata/systemprobe/def/component.go index 7dc0c5d0a4379e..e733c5a4b9db1c 100644 --- a/comp/metadata/systemprobe/def/component.go +++ b/comp/metadata/systemprobe/def/component.go @@ -6,7 +6,7 @@ // Package systemprobe is the metadata provider for system-probe process package systemprobe -// team: agent-shared-components +// team: agent-configuration // Component is the component type. type Component interface{} diff --git a/docs/public/components/creating-components.md b/docs/public/components/creating-components.md index ec000dfd93ee27..5a4b5f173a7d74 100644 --- a/docs/public/components/creating-components.md +++ b/docs/public/components/creating-components.md @@ -93,7 +93,7 @@ When defining a component interface, avoid using structs or interfaces from thir import "github.com/prometheus/client_golang/prometheus" - // team: agent-shared-components + // team: agent-runtimes // Component is the component type. type Component interface { diff --git a/tasks/components.py b/tasks/components.py index b644ef90d155c2..0f2281742b7147 100644 --- a/tasks/components.py +++ b/tasks/components.py @@ -473,7 +473,7 @@ def make_codeowners(codeowners_lines, bundles, components_without_bundle): # codeowners is parsed in a last-match-wins fashion, so put more-specific values (components) after # less-specific (bundles). We include only components with a team different from their bundle, to # keep the file short. - yield '/comp @DataDog/agent-shared-components' + yield '/comp @DataDog/agent-runtimes' different_components = [] for b in bundles: if b.team: diff --git a/tasks/libs/issue/assign.py b/tasks/libs/issue/assign.py index 5adad877e39868..4b2e8370abb3dd 100644 --- a/tasks/libs/issue/assign.py +++ b/tasks/libs/issue/assign.py @@ -87,7 +87,8 @@ def simple_match(word): "agent-cspm": ['cspm'], "ebpf-platform": ['ebpf', 'system-prob', 'sys-prob'], "agent-security": ['security', 'vuln', 'security-agent'], - "agent-shared-components": ['fips', 'inventory', 'payload', 'jmx', 'intak', 'gohai'], + "agent-runtimes": ['fips', 'payload', 'jmx', 'intake'], + "agent-configuration": ['inventory', 'gohai'], "fleet": ['fleet', 'fleet-automation'], "opentelemetry": ['otel', 'opentelemetry'], "windows-agent": ['windows', 'sys32', 'powershell'], diff --git a/tasks/libs/issue/model/constants.py b/tasks/libs/issue/model/constants.py index de9592a6e02dde..d71eb0ef3afea6 100644 --- a/tasks/libs/issue/model/constants.py +++ b/tasks/libs/issue/model/constants.py @@ -25,7 +25,8 @@ 'opentelemetry', 'universal-service-monitoring', 'agent-build-and-releases', - 'agent-shared-components', + 'agent-configuration', + 'agent-runtimes', 'agent-integrations', 'agent-metrics-logs', 'agent-metrics', diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml index 58ec5fa29c9959..4e311b495aa7be 100644 --- a/tasks/libs/pipeline/github_jira_map.yaml +++ b/tasks/libs/pipeline/github_jira_map.yaml @@ -15,7 +15,8 @@ '@datadog/agent-metrics-logs': AMLII '@datadog/agent-metrics': AGTMETRICS '@datadog/agent-logs': AGNTLOG -'@datadog/agent-shared-components': ASCII +'@datadog/agent-runtimes': ARUN +'@datadog/agent-configuration': ACFG '@datadog/container-app': CAP '@datadog/metrics-aggregation': AGGR '@datadog/serverless': SVLS diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index 75dbeeab794e15..982d3523fc698e 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -18,7 +18,8 @@ '@datadog/agent-metrics': '#agent-metrics' '@datadog/agent-logs': '#agent-logs' '@datadog/agent-processing-and-routing': '#agent-processing-and-routing' -'@datadog/agent-shared-components': '#agent-shared-components-ops' +'@datadog/agent-runtimes': '#agent-runtimes-ops' +'@datadog/agent-configuration': '#agent-configuration-ops' '@datadog/container-app': '#container-app' '@datadog/metrics-aggregation': '#metrics-aggregation' '@datadog/serverless': '#serverless-agent' diff --git a/tasks/libs/pipeline/github_slack_review_map.yaml b/tasks/libs/pipeline/github_slack_review_map.yaml index c93a70bdd338e4..e0e37dde9e150c 100644 --- a/tasks/libs/pipeline/github_slack_review_map.yaml +++ b/tasks/libs/pipeline/github_slack_review_map.yaml @@ -19,7 +19,8 @@ '@datadog/agent-metrics': '#agent-metrics' '@datadog/agent-logs': '#agent-logs' '@datadog/agent-processing-and-routing': '#agent-processing-and-routing' -'@datadog/agent-shared-components': '#agent-shared-components' +'@datadog/agent-runtimes': '#agent-runtimes' +'@datadog/agent-configuration': '#agent-configuration' '@datadog/container-app': '#container-app' '@datadog/metrics-aggregation': '#metrics-aggregation' '@datadog/serverless': '#serverless-agent' diff --git a/tasks/linter.py b/tasks/linter.py index a0be3ca3970b95..01f77bd97915ed 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -654,7 +654,8 @@ def job_change_path(ctx, job_files=None): 'new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64', 'new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64', 'new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64', - 'new-e2e-agent-shared-components', + 'new-e2e-agent-runtimes', + 'new-e2e-agent-configuration', 'new-e2e-cws', 'new-e2e-language-detection', 'new-e2e-npm-docker', diff --git a/tasks/unit_tests/components_tests.py b/tasks/unit_tests/components_tests.py index b6b29acc681643..a73937853921d1 100644 --- a/tasks/unit_tests/components_tests.py +++ b/tasks/unit_tests/components_tests.py @@ -35,9 +35,9 @@ def tearDown(self): components.components_classic_style.remove(classicComp) def test_find_team(self): - content = ['// my file', '// team: agent-shared-components', '// file starts here'] + content = ['// my file', '// team: agent-runtimes', '// file starts here'] teamname = components.find_team(content) - self.assertEqual(teamname, 'agent-shared-components') + self.assertEqual(teamname, 'agent-runtimes') def test_get_components_and_bundles(self): comps, bundles = components.get_components_and_bundles() @@ -83,7 +83,7 @@ def test_validate_bundles(self): self.assertEqual(0, len(errs)) # Lint error because team owner is missing - remove_line(filename, '// team: agent-shared-components') + remove_line(filename, '// team: agent-runtimes') _, bundles = components.get_components_and_bundles() errs = components.validate_bundles(bundles) @@ -97,7 +97,7 @@ def test_validate_component_definition(self): # Lint error because team owner is missing filename = os.path.join(comps[3].path, 'def/component.go') - remove_line(filename, '// team: agent-shared-components') + remove_line(filename, '// team: agent-runtimes') comps, _ = components.get_components_and_bundles() errs = components.validate_components(comps) diff --git a/tasks/unit_tests/issue_tests.py b/tasks/unit_tests/issue_tests.py index e72676fc50e3fb..2185deed64f813 100644 --- a/tasks/unit_tests/issue_tests.py +++ b/tasks/unit_tests/issue_tests.py @@ -29,7 +29,7 @@ def test_from_simple_match(self): def test_with_a_file(self): issue = MagicMock(title="fix bug", body="It comes from the file pkg/agent/build.py") - self.assertEqual(guess_from_keywords(issue), "agent-shared-components") + self.assertEqual(guess_from_keywords(issue), "agent-runtimes") def test_no_match(self): issue = MagicMock(title="fix bug", body="It comes from the file... hm I don't know.") diff --git a/tasks/unit_tests/junit_tests.py b/tasks/unit_tests/junit_tests.py index 8f3f8b943bec05..d5839e3f4945c6 100644 --- a/tasks/unit_tests/junit_tests.py +++ b/tasks/unit_tests/junit_tests.py @@ -55,7 +55,7 @@ def test_without_split(self): def test_with_split(self): xml_file = Path("./tasks/unit_tests/testdata/secret.tar.gz/-go-src-datadog-agent-junit-out-base.xml") owners = read_owners(".github/CODEOWNERS") - self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 28) + self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 30) class TestGroupPerTag(unittest.TestCase): @@ -121,4 +121,4 @@ def test_e2e(self, mock_which, mock_popen): mock_which.side_effect = lambda cmd: f"/usr/local/bin/{cmd}" junit.junit_upload_from_tgz("tasks/unit_tests/testdata/testjunit-tests_deb-x64-py3.tgz") mock_popen.assert_called() - self.assertEqual(mock_popen.call_count, 30) + self.assertEqual(mock_popen.call_count, 32) diff --git a/tasks/unit_tests/release_tests.py b/tasks/unit_tests/release_tests.py index 5361cc9cb7625b..41f844a7565f92 100644 --- a/tasks/unit_tests/release_tests.py +++ b/tasks/unit_tests/release_tests.py @@ -547,11 +547,11 @@ def test_create_build_links_patterns_correct_values(self): class TestParseTable(unittest.TestCase): - html = "

Summary

Status

QAPurple

Release date

TBD

Release notes

https://github.com/DataDog/datadog-agent/releases/tag/7.55.0

Code freeze date

Release coordinator

Release managers

agent-metrics-logs

agent-shared-components

agent-processing-and-routing

processes

network-device-monitoring

container-app

container-integrations

container-platform

agent-security (CWS)

agent-security (CSPM)

agent-build-and-releases

agent-ci-experience

agent-developer-tools

agent-integrations

network-performance-monitoring

platform-integrations

apm

database-monitoring

remote-config/fleet-automation

windows-agent

opentelemetry

ebpf-platform

universal-service-monitoring

windows-kernel-integrations

apm-onboarding

Major changes

 CVE for otel

 

 

 

 

" + html = "

Summary

Status

QAPurple

Release date

TBD

Release notes

https://github.com/DataDog/datadog-agent/releases/tag/7.55.0

Code freeze date

Release coordinator

Release managers

agent-metrics-logs

agent-runtimes

agent-processing-and-routing

processes

network-device-monitoring

container-app

container-integrations

container-platform

agent-security (CWS)

agent-security (CSPM)

agent-build-and-releases

agent-ci-experience

agent-developer-tools

agent-integrations

network-performance-monitoring

platform-integrations

apm

database-monitoring

remote-config/fleet-automation

windows-agent

opentelemetry

ebpf-platform

universal-service-monitoring

windows-kernel-integrations

apm-onboarding

Major changes

 CVE for otel

 

 

 

 

" def test_find_missing_rm(self): missing = list(parse_table(self.html, missing=True)) - self.assertListEqual(['agent-shared-components', 'container-integrations'], missing) + self.assertListEqual(['agent-runtimes', 'container-integrations'], missing) def test_find_rm(self): user = list(parse_table(self.html, missing=False, teams=['agent-integrations'])) diff --git a/tasks/unit_tests/testdata/components_src/comp/classic/component.go b/tasks/unit_tests/testdata/components_src/comp/classic/component.go index 85eef7818cddff..6ca8459ea8a76e 100644 --- a/tasks/unit_tests/testdata/components_src/comp/classic/component.go +++ b/tasks/unit_tests/testdata/components_src/comp/classic/component.go @@ -6,6 +6,6 @@ // Package classic uses classic folder structure package classic -// team: agent-shared-components +// team: agent-runtimes type Component interface{} diff --git a/tasks/unit_tests/testdata/components_src/comp/group/bundle.go b/tasks/unit_tests/testdata/components_src/comp/group/bundle.go index 8a8979e5e71ce1..5103dc7a298d5b 100644 --- a/tasks/unit_tests/testdata/components_src/comp/group/bundle.go +++ b/tasks/unit_tests/testdata/components_src/comp/group/bundle.go @@ -8,7 +8,7 @@ package group import "github.com/DataDog/datadog-agent/pkg/util/fxutil" -// team: agent-shared-components +// team: agent-runtimes func Bundle() fxutil.BundleOptions { return fxutil.Bundle() diff --git a/tasks/unit_tests/testdata/components_src/comp/group/inbundle/def/component.go b/tasks/unit_tests/testdata/components_src/comp/group/inbundle/def/component.go index 6b6a8ac97040e5..58525c4edcf1c8 100644 --- a/tasks/unit_tests/testdata/components_src/comp/group/inbundle/def/component.go +++ b/tasks/unit_tests/testdata/components_src/comp/group/inbundle/def/component.go @@ -6,6 +6,6 @@ // Package newstyle uses new folder structure package inbundle -// team: agent-shared-components +// team: agent-runtimes type Component interface{} diff --git a/tasks/unit_tests/testdata/components_src/comp/multiple/def/component.go b/tasks/unit_tests/testdata/components_src/comp/multiple/def/component.go index 7367d20aaa43ea..8f1182ee8b5530 100644 --- a/tasks/unit_tests/testdata/components_src/comp/multiple/def/component.go +++ b/tasks/unit_tests/testdata/components_src/comp/multiple/def/component.go @@ -6,6 +6,6 @@ // Package multiple uses multiple implementations package multiple -// team: agent-shared-components +// team: agent-runtimes type Component interface{} diff --git a/tasks/unit_tests/testdata/components_src/comp/newstyle/def/component.go b/tasks/unit_tests/testdata/components_src/comp/newstyle/def/component.go index 5c0147786be455..40c651d83dd937 100644 --- a/tasks/unit_tests/testdata/components_src/comp/newstyle/def/component.go +++ b/tasks/unit_tests/testdata/components_src/comp/newstyle/def/component.go @@ -6,6 +6,6 @@ // Package newstyle uses new folder structure package newstyle -// team: agent-shared-components +// team: agent-runtimes type Component interface{} diff --git a/test/new-e2e/examples/agentenv_file_permissions_test.go b/test/new-e2e/examples/agentenv_file_permissions_test.go index 516498473859db..c3de506d488dd4 100644 --- a/test/new-e2e/examples/agentenv_file_permissions_test.go +++ b/test/new-e2e/examples/agentenv_file_permissions_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type filePermissionsTestSuite struct { diff --git a/test/new-e2e/examples/agentenv_file_permissions_win_test.go b/test/new-e2e/examples/agentenv_file_permissions_win_test.go index faf7fae0f2e96d..dfd437308e1345 100644 --- a/test/new-e2e/examples/agentenv_file_permissions_win_test.go +++ b/test/new-e2e/examples/agentenv_file_permissions_win_test.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type filePermissionsWindowsTestSuite struct { diff --git a/test/new-e2e/tests/agent-shared-components/api/api_test.go b/test/new-e2e/tests/agent-configuration/api/api_test.go similarity index 99% rename from test/new-e2e/tests/agent-shared-components/api/api_test.go rename to test/new-e2e/tests/agent-configuration/api/api_test.go index db7b22ffcdc490..f26b72b55fb1d7 100644 --- a/test/new-e2e/tests/agent-shared-components/api/api_test.go +++ b/test/new-e2e/tests/agent-configuration/api/api_test.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) const ( diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go b/test/new-e2e/tests/agent-configuration/config-refresh/config_endpoint.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/config_endpoint.go rename to test/new-e2e/tests/agent-configuration/config-refresh/config_endpoint.go diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/docs.go b/test/new-e2e/tests/agent-configuration/config-refresh/docs.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/docs.go rename to test/new-e2e/tests/agent-configuration/config-refresh/docs.go diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/config.yaml.tmpl b/test/new-e2e/tests/agent-configuration/config-refresh/fixtures/config.yaml.tmpl similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/config.yaml.tmpl rename to test/new-e2e/tests/agent-configuration/config-refresh/fixtures/config.yaml.tmpl diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/secret-resolver.py b/test/new-e2e/tests/agent-configuration/config-refresh/fixtures/secret-resolver.py similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/secret-resolver.py rename to test/new-e2e/tests/agent-configuration/config-refresh/fixtures/secret-resolver.py diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/security-agent.yaml b/test/new-e2e/tests/agent-configuration/config-refresh/fixtures/security-agent.yaml similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/fixtures/security-agent.yaml rename to test/new-e2e/tests/agent-configuration/config-refresh/fixtures/security-agent.yaml diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_common.go b/test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_common.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_common.go rename to test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_common.go diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_nix_test.go b/test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_nix_test.go similarity index 99% rename from test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_nix_test.go rename to test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_nix_test.go index d12913b0ea296e..4c206878613068 100644 --- a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_nix_test.go +++ b/test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_nix_test.go @@ -21,7 +21,7 @@ import ( awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type configRefreshLinuxSuite struct { diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go b/test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_win_test.go similarity index 99% rename from test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go rename to test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_win_test.go index a68e94ec1d6fee..13020dd0d571b0 100644 --- a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go +++ b/test/new-e2e/tests/agent-configuration/config-refresh/non_core_agents_sync_win_test.go @@ -21,7 +21,7 @@ import ( awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type configRefreshWindowsSuite struct { diff --git a/test/new-e2e/tests/agent-shared-components/gui/gui_common.go b/test/new-e2e/tests/agent-configuration/gui/gui_common.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/gui/gui_common.go rename to test/new-e2e/tests/agent-configuration/gui/gui_common.go diff --git a/test/new-e2e/tests/agent-shared-components/gui/gui_nix_test.go b/test/new-e2e/tests/agent-configuration/gui/gui_nix_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/gui/gui_nix_test.go rename to test/new-e2e/tests/agent-configuration/gui/gui_nix_test.go diff --git a/test/new-e2e/tests/agent-shared-components/gui/gui_win_test.go b/test/new-e2e/tests/agent-configuration/gui/gui_win_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/gui/gui_win_test.go rename to test/new-e2e/tests/agent-configuration/gui/gui_win_test.go diff --git a/test/new-e2e/tests/agent-shared-components/inventory/inventory_agent_test.go b/test/new-e2e/tests/agent-configuration/inventory/inventory_agent_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/inventory/inventory_agent_test.go rename to test/new-e2e/tests/agent-configuration/inventory/inventory_agent_test.go diff --git a/test/new-e2e/tests/agent-shared-components/secret/fixtures/secret_script.py b/test/new-e2e/tests/agent-configuration/secret/fixtures/secret_script.py similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secret/fixtures/secret_script.py rename to test/new-e2e/tests/agent-configuration/secret/fixtures/secret_script.py diff --git a/test/new-e2e/tests/agent-shared-components/secret/secret_common_test.go b/test/new-e2e/tests/agent-configuration/secret/secret_common_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secret/secret_common_test.go rename to test/new-e2e/tests/agent-configuration/secret/secret_common_test.go diff --git a/test/new-e2e/tests/agent-shared-components/secret/secret_nix_test.go b/test/new-e2e/tests/agent-configuration/secret/secret_nix_test.go similarity index 97% rename from test/new-e2e/tests/agent-shared-components/secret/secret_nix_test.go rename to test/new-e2e/tests/agent-configuration/secret/secret_nix_test.go index 1cb11d89fffa09..f689982a7403dc 100644 --- a/test/new-e2e/tests/agent-shared-components/secret/secret_nix_test.go +++ b/test/new-e2e/tests/agent-configuration/secret/secret_nix_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type linuxRuntimeSecretSuite struct { diff --git a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go b/test/new-e2e/tests/agent-configuration/secret/secret_win_test.go similarity index 98% rename from test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go rename to test/new-e2e/tests/agent-configuration/secret/secret_win_test.go index c2b51d767d3066..0ff1e85dd46b82 100644 --- a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-configuration/secret/secret_win_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type windowsRuntimeSecretSuite struct { diff --git a/test/new-e2e/tests/agent-shared-components/secretsutils/client.go b/test/new-e2e/tests/agent-configuration/secretsutils/client.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secretsutils/client.go rename to test/new-e2e/tests/agent-configuration/secretsutils/client.go diff --git a/test/new-e2e/tests/agent-shared-components/secretsutils/fixtures/secret-resolver.py b/test/new-e2e/tests/agent-configuration/secretsutils/fixtures/secret-resolver.py similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secretsutils/fixtures/secret-resolver.py rename to test/new-e2e/tests/agent-configuration/secretsutils/fixtures/secret-resolver.py diff --git a/test/new-e2e/tests/agent-shared-components/secretsutils/fixtures/secret_wrapper.bat b/test/new-e2e/tests/agent-configuration/secretsutils/fixtures/secret_wrapper.bat similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secretsutils/fixtures/secret_wrapper.bat rename to test/new-e2e/tests/agent-configuration/secretsutils/fixtures/secret_wrapper.bat diff --git a/test/new-e2e/tests/agent-shared-components/secretsutils/helpers.go b/test/new-e2e/tests/agent-configuration/secretsutils/helpers.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/secretsutils/helpers.go rename to test/new-e2e/tests/agent-configuration/secretsutils/helpers.go diff --git a/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go b/test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go rename to test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go diff --git a/test/new-e2e/tests/agent-shared-components/forwarder/testfixtures/config.yaml.tmpl b/test/new-e2e/tests/agent-runtimes/forwarder/testfixtures/config.yaml.tmpl similarity index 100% rename from test/new-e2e/tests/agent-shared-components/forwarder/testfixtures/config.yaml.tmpl rename to test/new-e2e/tests/agent-runtimes/forwarder/testfixtures/config.yaml.tmpl diff --git a/test/new-e2e/tests/agent-shared-components/forwarder/testfixtures/custom_logs.yaml.tmpl b/test/new-e2e/tests/agent-runtimes/forwarder/testfixtures/custom_logs.yaml.tmpl similarity index 100% rename from test/new-e2e/tests/agent-shared-components/forwarder/testfixtures/custom_logs.yaml.tmpl rename to test/new-e2e/tests/agent-runtimes/forwarder/testfixtures/custom_logs.yaml.tmpl diff --git a/test/new-e2e/tests/agent-shared-components/hostname/imdsv2_transition_common_test.go b/test/new-e2e/tests/agent-runtimes/hostname/imdsv2_transition_common_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/hostname/imdsv2_transition_common_test.go rename to test/new-e2e/tests/agent-runtimes/hostname/imdsv2_transition_common_test.go diff --git a/test/new-e2e/tests/agent-shared-components/ipc/docs.go b/test/new-e2e/tests/agent-runtimes/ipc/docs.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/docs.go rename to test/new-e2e/tests/agent-runtimes/ipc/docs.go diff --git a/test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl b/test/new-e2e/tests/agent-runtimes/ipc/fixtures/config.yaml.tmpl similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/fixtures/config.yaml.tmpl rename to test/new-e2e/tests/agent-runtimes/ipc/fixtures/config.yaml.tmpl diff --git a/test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml b/test/new-e2e/tests/agent-runtimes/ipc/fixtures/security-agent.yaml similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/fixtures/security-agent.yaml rename to test/new-e2e/tests/agent-runtimes/ipc/fixtures/security-agent.yaml diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go b/test/new-e2e/tests/agent-runtimes/ipc/ipc_security_common.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/ipc_security_common.go rename to test/new-e2e/tests/agent-runtimes/ipc/ipc_security_common.go diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go b/test/new-e2e/tests/agent-runtimes/ipc/ipc_security_nix_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/ipc_security_nix_test.go rename to test/new-e2e/tests/agent-runtimes/ipc/ipc_security_nix_test.go diff --git a/test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go b/test/new-e2e/tests/agent-runtimes/ipc/ipc_security_win_test.go similarity index 100% rename from test/new-e2e/tests/agent-shared-components/ipc/ipc_security_win_test.go rename to test/new-e2e/tests/agent-runtimes/ipc/ipc_security_win_test.go diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go index 7050698056943c..b73bf2ab72ed24 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go index bbeeef59ec1dd6..f586b57a46ff8e 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" - secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type windowsSecretSuite struct { diff --git a/test/new-e2e/tests/apm/vm_test.go b/test/new-e2e/tests/apm/vm_test.go index 006cd6353bd792..2540c4f5d5d3df 100644 --- a/test/new-e2e/tests/apm/vm_test.go +++ b/test/new-e2e/tests/apm/vm_test.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-configuration/secretsutils" ) type VMFakeintakeSuite struct { From c68ee8d8d517d8e70559904ea19c7d91a0a68ca9 Mon Sep 17 00:00:00 2001 From: Guillaume Fournier <36961134+Gui774ume@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:29:17 +0100 Subject: [PATCH 89/97] [CWS] Fix inet_bind / inet6_bind probes (#33541) --- .../ebpf/c/include/hooks/network/flow.h | 32 +- pkg/security/ebpf/probes/event_types.go | 3 + pkg/security/ebpf/probes/flow.go | 18 + pkg/security/tests/flow_pid_test.go | 818 ++++++++++++++++++ 4 files changed, 865 insertions(+), 6 deletions(-) create mode 100644 pkg/security/tests/flow_pid_test.go diff --git a/pkg/security/ebpf/c/include/hooks/network/flow.h b/pkg/security/ebpf/c/include/hooks/network/flow.h index dc94ff71da27cb..5ba997ea34262b 100644 --- a/pkg/security/ebpf/c/include/hooks/network/flow.h +++ b/pkg/security/ebpf/c/include/hooks/network/flow.h @@ -320,9 +320,7 @@ int hook_inet_release(ctx_t *ctx) { return handle_sk_release(sk); } -HOOK_ENTRY("inet_bind") -int hook_inet_bind(ctx_t *ctx) { - struct socket *sock = (struct socket *)CTX_PARM1(ctx); +__attribute__((always_inline)) int handle_inet_bind(struct socket *sock) { struct inet_bind_args_t args = {}; args.sock = sock; u64 pid = bpf_get_current_pid_tgid(); @@ -330,8 +328,19 @@ int hook_inet_bind(ctx_t *ctx) { return 0; } -HOOK_EXIT("inet_bind") -int rethook_inet_bind(ctx_t *ctx) { +HOOK_ENTRY("inet_bind") +int hook_inet_bind(ctx_t *ctx) { + struct socket *sock = (struct socket *)CTX_PARM1(ctx); + return handle_inet_bind(sock); +} + +HOOK_ENTRY("inet6_bind") +int hook_inet6_bind(ctx_t *ctx) { + struct socket *sock = (struct socket *)CTX_PARM1(ctx); + return handle_inet_bind(sock); +} + +__attribute__((always_inline)) int handle_inet_bind_ret(int ret) { // fetch inet_bind arguments u64 id = bpf_get_current_pid_tgid(); u32 tid = (u32)id; @@ -344,7 +353,6 @@ int rethook_inet_bind(ctx_t *ctx) { // delete the entry in inet_bind_args to make sure we always cleanup inet_bind_args and we don't leak entries bpf_map_delete_elem(&inet_bind_args, &id); - int ret = CTX_PARMRET(ctx); if (ret < 0) { // we only care about successful bind operations return 0; @@ -394,4 +402,16 @@ int rethook_inet_bind(ctx_t *ctx) { return 0; } +HOOK_EXIT("inet_bind") +int rethook_inet_bind(ctx_t *ctx) { + int ret = CTX_PARMRET(ctx); + return handle_inet_bind_ret(ret); +} + +HOOK_EXIT("inet6_bind") +int rethook_inet6_bind(ctx_t *ctx) { + int ret = CTX_PARMRET(ctx); + return handle_inet_bind_ret(ret); +} + #endif diff --git a/pkg/security/ebpf/probes/event_types.go b/pkg/security/ebpf/probes/event_types.go index 543a605211a66c..f20f3c5717c445 100644 --- a/pkg/security/ebpf/probes/event_types.go +++ b/pkg/security/ebpf/probes/event_types.go @@ -48,6 +48,9 @@ func NetworkSelectors() []manager.ProbesSelector { kprobeOrFentry("inet_release"), kprobeOrFentry("inet_shutdown"), kprobeOrFentry("inet_bind"), + kretprobeOrFexit("inet_bind"), + kprobeOrFentry("inet6_bind"), + kretprobeOrFexit("inet6_bind"), kprobeOrFentry("sk_common_release"), kprobeOrFentry("path_get"), kprobeOrFentry("proc_fd_link"), diff --git a/pkg/security/ebpf/probes/flow.go b/pkg/security/ebpf/probes/flow.go index 8700f5093a6995..54cc2b1af79448 100644 --- a/pkg/security/ebpf/probes/flow.go +++ b/pkg/security/ebpf/probes/flow.go @@ -42,6 +42,24 @@ func getFlowProbes() []*manager.Probe { EBPFFuncName: "hook_inet_bind", }, }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "rethook_inet_bind", + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_inet6_bind", + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "rethook_inet6_bind", + }, + }, { ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, diff --git a/pkg/security/tests/flow_pid_test.go b/pkg/security/tests/flow_pid_test.go new file mode 100644 index 00000000000000..e5557e94f0b537 --- /dev/null +++ b/pkg/security/tests/flow_pid_test.go @@ -0,0 +1,818 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && functionaltests + +// Package tests holds tests related files +package tests + +import ( + "encoding/binary" + "fmt" + "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/stretchr/testify/assert" + "golang.org/x/net/nettest" + "os" + "regexp" + "strconv" + "syscall" + "testing" + + "github.com/DataDog/datadog-agent/pkg/security/probe" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +var networkNamespacePattern = regexp.MustCompile(`net:\[(\d+)\]`) + +func htons(port uint16) uint16 { + return (port<<8)&0xFF00 | (port>>8)&0x00FF +} + +func getCurrentNetns() (uint32, error) { + // open netns + f, err := os.Open("/proc/self/ns/net") + if err != nil { + return 0, err + } + defer f.Close() + + l, err := os.Readlink(f.Name()) + if err != nil { + return 0, err + } + + matches := networkNamespacePattern.FindSubmatch([]byte(l)) + if len(matches) <= 1 { + return 0, fmt.Errorf("couldn't parse network namespace ID: %s", l) + } + + netns, err := strconv.ParseUint(string(matches[1]), 10, 32) + if err != nil { + return 0, err + } + return uint32(netns), nil +} + +type FlowPid struct { + Addr0 uint64 + Addr1 uint64 + Netns uint32 + Port uint16 + Padding uint16 +} + +type FlowPidEntry struct { + Pid uint32 + EntryType uint32 +} + +func createSocketAndBind(t *testing.T, sockDomain int, sockType int, sockAddr syscall.Sockaddr, bound chan int, next chan struct{}, closed chan struct{}, errorExpected bool) { + fd, err := syscall.Socket(sockDomain, sockType, 0) + if err != nil { + close(bound) + close(closed) + t.Errorf("Socket error: %v", err) + return + } + defer func() { + _ = syscall.Close(fd) + close(closed) + }() + + if err := syscall.Bind(fd, sockAddr); err != nil { + if !errorExpected { + close(bound) + t.Errorf("Bind error: %v", err) + return + } + } + + // retrieve bound port + boundPort := 0 + if !errorExpected { + sa, err := syscall.Getsockname(fd) + if err != nil { + close(bound) + t.Errorf("Getsockname error: %v", err) + return + } + switch addr := sa.(type) { + case *syscall.SockaddrInet6: + boundPort = addr.Port + case *syscall.SockaddrInet4: + boundPort = addr.Port + default: + close(bound) + t.Error("Getsockname error: unknown Sockaddr type") + return + } + } + + bound <- boundPort + <-next +} + +func checkFlowPidEntry(t *testing.T, testModule *testModule, key FlowPid, expectedEntry FlowPidEntry, bound chan int, next chan struct{}, closed chan struct{}, errorExpected bool) { + boundPort := <-bound + if key.Port == 0 && !errorExpected { + key.Port = htons(uint16(boundPort)) + } + + // check that an entry exists for the newly bound server + p, ok := testModule.probe.PlatformProbe.(*probe.EBPFProbe) + if !ok { + close(next) + t.Skip("skipping non eBPF probe") + } + + m, _, err := p.Manager.GetMap("flow_pid") + if err != nil { + close(next) + t.Errorf("failed to get map flow_pid: %v", err) + return + } + + value := FlowPidEntry{} + if !errorExpected { + if err := m.Lookup(&key, &value); err != nil { + t.Log("Dumping flow_pid map ...") + it := m.Iterate() + a := FlowPid{} + b := FlowPidEntry{} + for it.Next(&a, &b) { + t.Logf(" - key %+v value %+v", a, b) + } + t.Logf("The test was looking for key %+v", key) + + close(next) + t.Errorf("Failed to lookup flow_pid: %v", err) + return + } + + assert.Equal(t, expectedEntry.Pid, value.Pid, "wrong pid") + assert.Equal(t, expectedEntry.EntryType, value.EntryType, "wrong entry type") + } + + close(next) + + // wait until the socket is closed and make sure the entry is no longer present + <-closed + if err := m.Lookup(&key, &value); err == nil { + t.Errorf("flow_pid entry wasn't deleted: %+v", value) + } + + // make sure that no other entry in the map contains the EntryPid port + it := m.Iterate() + a := FlowPid{} + b := FlowPidEntry{} + for it.Next(&a, &b) { + if a.Port == key.Port { + t.Errorf("flow_pid entry with matching port found %+v -> %+v", a, b) + return + } + } + +} + +func TestFlowPidBind(t *testing.T) { + SkipIfNotAvailable(t) + if testEnvironment == DockerEnvironment || env.IsContainerized() { + t.Skip("Skip tests inside docker") + } + + checkNetworkCompatibility(t) + + if out, err := loadModule("veth"); err != nil { + t.Fatalf("couldn't load 'veth' module: %s,%v", string(out), err) + } + + ruleDefs := []*rules.RuleDefinition{ + // We use this dummy DNS rule to make sure the flow <-> pid tracking probes are loaded + { + ID: "test_dns", + Expression: `dns.question.name == "testsuite"`, + }, + } + + pid := uint32(os.Getpid()) + netns, err := getCurrentNetns() + if err != nil { + t.Fatalf("failed to get the network namespace: %v", err) + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + t.Run("test_sock_ipv4_udp_bind_0.0.0.0:1234", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet4{Port: 1234, Addr: [4]byte{0, 0, 0, 0}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Netns: netns, + Port: htons(1234), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv4_udp_bind_127.0.0.1:1235", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet4{Port: 1235, Addr: [4]byte{127, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 1, 0, 0, 127}), + Netns: netns, + Port: htons(1235), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv4_udp_bind_127.0.0.1:0", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet4{Port: 0, Addr: [4]byte{127, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 1, 0, 0, 127}), + Netns: netns, + Port: 0, // will be set later + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_udp_bind_[::]:1236", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet6{Port: 1236, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Netns: netns, + Port: htons(1236), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_udp_bind_[::1]:1237", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet6{Port: 1237, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr1: binary.BigEndian.Uint64([]byte{1, 0, 0, 0, 0, 0, 0, 0}), + Netns: netns, + Port: htons(1237), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_udp_bind_[::1]:0", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet6{Port: 0, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr1: binary.BigEndian.Uint64([]byte{1, 0, 0, 0, 0, 0, 0, 0}), + Netns: netns, + Port: 0, // will be set later + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv4_tcp_bind_0.0.0.0:1234", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_STREAM, + &syscall.SockaddrInet4{Port: 1234, Addr: [4]byte{0, 0, 0, 0}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Netns: netns, + Port: htons(1234), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv4_tcp_bind_127.0.0.1:1235", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_STREAM, + &syscall.SockaddrInet4{Port: 1235, Addr: [4]byte{127, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 1, 0, 0, 127}), + Netns: netns, + Port: htons(1235), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv4_tcp_bind_127.0.0.1:0", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_STREAM, + &syscall.SockaddrInet4{Port: 0, Addr: [4]byte{127, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 1, 0, 0, 127}), + Netns: netns, + Port: 0, // will be set later + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_tcp_bind_[::]:1236", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_STREAM, + &syscall.SockaddrInet6{Port: 1236, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Netns: netns, + Port: htons(1236), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_tcp_bind_[::1]:1237", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_STREAM, + &syscall.SockaddrInet6{Port: 1237, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr1: binary.BigEndian.Uint64([]byte{1, 0, 0, 0, 0, 0, 0, 0}), + Netns: netns, + Port: htons(1237), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) + + t.Run("test_sock_ipv6_tcp_bind_[::1]:0", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_STREAM, + &syscall.SockaddrInet6{Port: 0, Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + bound, + next, + closed, + false, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr1: binary.BigEndian.Uint64([]byte{1, 0, 0, 0, 0, 0, 0, 0}), + Netns: netns, + Port: 0, // will be set later + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + false, + ) + }) +} + +func TestFlowPidBindLeak(t *testing.T) { + SkipIfNotAvailable(t) + if testEnvironment == DockerEnvironment || env.IsContainerized() { + t.Skip("Skip tests inside docker") + } + + checkNetworkCompatibility(t) + + if out, err := loadModule("veth"); err != nil { + t.Fatalf("couldn't load 'veth' module: %s,%v", string(out), err) + } + + ruleDefs := []*rules.RuleDefinition{ + // We use this dummy DNS rule to make sure the flow <-> pid tracking probes are loaded + { + ID: "test_dns", + Expression: `dns.question.name == "testsuite"`, + }, + } + + pid := uint32(os.Getpid()) + netns, err := utils.NetNSPathFromPid(pid).GetProcessNetworkNamespace() + if err != nil { + t.Fatalf("failed to get the network namespace: %v", err) + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + t.Run("test_sock_ipv4_udp_bind_99.99.99.99:2234", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet4{Port: 2234, Addr: [4]byte{99, 99, 99, 99}}, + bound, + next, + closed, + true, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 99, 99, 99, 99}), + Netns: netns, + Port: htons(2234), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + true, + ) + }) + + t.Run("test_sock_ipv4_tcp_bind_99.99.99.99:2235", func(t *testing.T) { + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET, + syscall.SOCK_STREAM, + &syscall.SockaddrInet4{Port: 2235, Addr: [4]byte{99, 99, 99, 99}}, + bound, + next, + closed, + true, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{0, 0, 0, 0, 99, 99, 99, 99}), + Netns: netns, + Port: htons(2235), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + true, + ) + }) + + t.Run("test_sock_ipv6_udp_bind_[99*]:2236", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet6{Port: 2236, Addr: [16]byte{99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}}, + bound, + next, + closed, + true, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{99, 99, 99, 99, 99, 99, 99, 99}), + Addr1: binary.BigEndian.Uint64([]byte{99, 99, 99, 99, 99, 99, 99, 99}), + Netns: netns, + Port: htons(2236), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + true, + ) + }) + + t.Run("test_sock_ipv6_tcp_bind_[99*]:2237", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + bound := make(chan int) + next := make(chan struct{}) + closed := make(chan struct{}) + + go createSocketAndBind( + t, + syscall.AF_INET6, + syscall.SOCK_DGRAM, + &syscall.SockaddrInet6{Port: 2237, Addr: [16]byte{99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}}, + bound, + next, + closed, + true, + ) + checkFlowPidEntry( + t, + test, + FlowPid{ + Addr0: binary.BigEndian.Uint64([]byte{99, 99, 99, 99, 99, 99, 99, 99}), + Addr1: binary.BigEndian.Uint64([]byte{99, 99, 99, 99, 99, 99, 99, 99}), + Netns: netns, + Port: htons(2237), + }, + FlowPidEntry{ + Pid: pid, + EntryType: uint32(0), /* BIND_ENTRY */ + }, + bound, + next, + closed, + true, + ) + }) +} From cd041a215b45cc3fe9f5bf8064d76b5f56292edb Mon Sep 17 00:00:00 2001 From: David Ortiz Date: Fri, 31 Jan 2025 13:23:56 +0100 Subject: [PATCH 90/97] [languagedetection] Refactor languagemodels package (#33356) --- .../api/v1/languagedetection/util.go | 19 +- .../api/v1/languagedetection/util_test.go | 197 +++++++++--------- .../kubeapiserver/deployments_test.go | 18 +- .../kubernetes_resource_parsers/deployment.go | 14 +- .../deployment_test.go | 40 ++-- comp/core/workloadmeta/def/types.go | 7 +- .../client/clientimpl/client.go | 6 +- .../client/clientimpl/client_test.go | 60 +++--- .../client/clientimpl/util.go | 12 +- .../auto_instrumentation_test.go | 8 +- .../auto_instrumentation_util_test.go | 14 +- .../admission/mutate/common/test_utils.go | 10 +- pkg/clusteragent/languagedetection/patcher.go | 3 +- .../languagedetection/patcher_test.go | 47 +++-- .../{util => languagemodels}/README.md | 0 .../{util => languagemodels}/annotations.go | 3 +- .../annotations_test.go | 2 +- .../containerlanguages.go | 5 +- .../containerlanguages_test.go | 5 +- .../{util => languagemodels}/languageset.go | 20 +- .../languageset_test.go | 11 +- pkg/languagedetection/util/doc.go | 2 +- 22 files changed, 250 insertions(+), 253 deletions(-) rename pkg/languagedetection/{util => languagemodels}/README.md (100%) rename pkg/languagedetection/{util => languagemodels}/annotations.go (91%) rename pkg/languagedetection/{util => languagemodels}/annotations_test.go (98%) rename pkg/languagedetection/{util => languagemodels}/containerlanguages.go (99%) rename pkg/languagedetection/{util => languagemodels}/containerlanguages_test.go (99%) rename pkg/languagedetection/{util => languagemodels}/languageset.go (86%) rename pkg/languagedetection/{util => languagemodels}/languageset_test.go (97%) diff --git a/cmd/cluster-agent/api/v1/languagedetection/util.go b/cmd/cluster-agent/api/v1/languagedetection/util.go index f4a94951c7c569..873a151a6e46ce 100644 --- a/cmd/cluster-agent/api/v1/languagedetection/util.go +++ b/cmd/cluster-agent/api/v1/languagedetection/util.go @@ -15,6 +15,7 @@ import ( "time" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) @@ -23,13 +24,13 @@ import ( // The dirty flag is used to know if the containers languages are flushed to workload metadata store or not. // The dirty flag is reset when languages are flushed to workload metadata store. type containersLanguageWithDirtyFlag struct { - languages langUtil.TimedContainersLanguages + languages languagemodels.TimedContainersLanguages dirty bool } func newContainersLanguageWithDirtyFlag() *containersLanguageWithDirtyFlag { return &containersLanguageWithDirtyFlag{ - languages: make(langUtil.TimedContainersLanguages), + languages: make(languagemodels.TimedContainersLanguages), dirty: true, } } @@ -214,17 +215,17 @@ func (ownersLanguages *OwnersLanguages) cleanRemovedOwners(wlm workloadmeta.Comp // generatePushEvent generates a workloadmeta push event based on the owner languages // if owner has no detected languages, it generates an unset event // else it generates a set event -func generatePushEvent(owner langUtil.NamespacedOwnerReference, languages langUtil.TimedContainersLanguages) *workloadmeta.Event { +func generatePushEvent(owner langUtil.NamespacedOwnerReference, languages languagemodels.TimedContainersLanguages) *workloadmeta.Event { _, found := langUtil.SupportedBaseOwners[owner.Kind] if !found { return nil } - containerLanguages := make(langUtil.ContainersLanguages) + containerLanguages := make(languagemodels.ContainersLanguages) for container, langsetWithExpiration := range languages { - containerLanguages[container] = make(langUtil.LanguageSet) + containerLanguages[container] = make(languagemodels.LanguageSet) for lang := range langsetWithExpiration { containerLanguages[container][lang] = struct{}{} } @@ -254,15 +255,15 @@ func generatePushEvent(owner langUtil.NamespacedOwnerReference, languages langUt // getContainersLanguagesFromPodDetail returns containers languages objects for both standard containers // and for init container -func getContainersLanguagesFromPodDetail(podDetail *pbgo.PodLanguageDetails, expirationTime time.Time) *langUtil.TimedContainersLanguages { - containersLanguages := make(langUtil.TimedContainersLanguages) +func getContainersLanguagesFromPodDetail(podDetail *pbgo.PodLanguageDetails, expirationTime time.Time) *languagemodels.TimedContainersLanguages { + containersLanguages := make(languagemodels.TimedContainersLanguages) // handle standard containers for _, containerLanguageDetails := range podDetail.ContainerDetails { containerName := containerLanguageDetails.ContainerName languages := containerLanguageDetails.Languages for _, language := range languages { - containersLanguages.GetOrInitialize(*langUtil.NewContainer(containerName)).Add(langUtil.Language(language.Name), expirationTime) + containersLanguages.GetOrInitialize(*languagemodels.NewContainer(containerName)).Add(languagemodels.LanguageName(language.Name), expirationTime) } } @@ -271,7 +272,7 @@ func getContainersLanguagesFromPodDetail(podDetail *pbgo.PodLanguageDetails, exp containerName := containerLanguageDetails.ContainerName languages := containerLanguageDetails.Languages for _, language := range languages { - containersLanguages.GetOrInitialize(*langUtil.NewInitContainer(containerName)).Add(langUtil.Language(language.Name), expirationTime) + containersLanguages.GetOrInitialize(*languagemodels.NewInitContainer(containerName)).Add(languagemodels.LanguageName(language.Name), expirationTime) } } diff --git a/cmd/cluster-agent/api/v1/languagedetection/util_test.go b/cmd/cluster-agent/api/v1/languagedetection/util_test.go index 65685b3578494a..b34743dffc2282 100644 --- a/cmd/cluster-agent/api/v1/languagedetection/util_test.go +++ b/cmd/cluster-agent/api/v1/languagedetection/util_test.go @@ -20,6 +20,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -60,8 +61,8 @@ func TestOwnersLanguagesGetOrInitialise(t *testing.T) { ownersLanguages: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, }, }, @@ -72,8 +73,8 @@ func TestOwnersLanguagesGetOrInitialise(t *testing.T) { ownerRef: mockNamespacedOwnerRef, expected: &containersLanguageWithDirtyFlag{ - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, }, }, @@ -115,8 +116,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { other: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": mockExpiration, }, }, @@ -127,8 +128,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { expectedAfterMerge: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": mockExpiration, }, }, @@ -142,8 +143,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { ownersLanguages: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, }, }, @@ -155,8 +156,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { expectedAfterMerge: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, }, }, @@ -170,8 +171,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { ownersLanguages: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, "ruby": {}, }, @@ -179,8 +180,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { dirty: false, }, cleanMockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-other-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-other-container"): { "java": {}, "ruby": {}, }, @@ -192,26 +193,26 @@ func TestOwnersLanguagesMerge(t *testing.T) { other: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "perl": {}, }, - *langUtil.NewContainer("some-other-container"): langUtil.TimedLanguageSet{ + *languagemodels.NewContainer("some-other-container"): languagemodels.TimedLanguageSet{ "cpp": {}, }, }, }, otherMockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-other-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-other-container"): { "java": {}, "cpp": {}, }, }, }, cleanMockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-other-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-other-container"): { "java": mockExpiration, "ruby": mockExpiration, }, @@ -222,21 +223,21 @@ func TestOwnersLanguagesMerge(t *testing.T) { expectedAfterMerge: &OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): langUtil.TimedLanguageSet{ + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): languagemodels.TimedLanguageSet{ "java": {}, "ruby": {}, "perl": {}, }, - *langUtil.NewContainer("some-other-container"): langUtil.TimedLanguageSet{ + *languagemodels.NewContainer("some-other-container"): languagemodels.TimedLanguageSet{ "cpp": {}, }, }, dirty: true, }, cleanMockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-other-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-other-container"): { "java": mockExpiration, "ruby": mockExpiration, }, @@ -244,8 +245,8 @@ func TestOwnersLanguagesMerge(t *testing.T) { dirty: false, }, otherMockNamespacedOwnerRef: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-other-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-other-container"): { "java": {}, "cpp": {}, }, @@ -274,8 +275,8 @@ func TestOwnersLanguagesFlush(t *testing.T) { ownersLanguages := OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockSupportedOwnerA: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "java": mockExpiration, "ruby": mockExpiration, "perl": mockExpiration, @@ -285,11 +286,11 @@ func TestOwnersLanguagesFlush(t *testing.T) { }, mockSupportedOwnerB: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "java": mockExpiration, }, - *langUtil.NewContainer("some-other-container"): { + *languagemodels.NewContainer("some-other-container"): { "cpp": mockExpiration, }, }, @@ -315,8 +316,8 @@ func TestOwnersLanguagesFlush(t *testing.T) { return false } - return reflect.DeepEqual(deploymentA.DetectedLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + return reflect.DeepEqual(deploymentA.DetectedLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "perl": {}, "java": {}, "ruby": {}, @@ -343,13 +344,13 @@ func TestOwnersLanguagesFlush(t *testing.T) { // add unsupported owner to ownerslanguages ownersLanguages.containersLanguages[mockUnsupportedOwner] = &containersLanguageWithDirtyFlag{ - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "perl": mockExpiration, "java": mockExpiration, "ruby": mockExpiration, }, - *langUtil.NewContainer("some-other-container"): { + *languagemodels.NewContainer("some-other-container"): { "cpp": mockExpiration, }, }, @@ -369,9 +370,9 @@ func TestOwnersLanguagesFlush(t *testing.T) { languagesInStore := deploymentB.DetectedLanguages - return reflect.DeepEqual(languagesInStore, langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): {"java": {}}, - *langUtil.NewContainer("some-other-container"): {"cpp": {}}, + return reflect.DeepEqual(languagesInStore, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): {"java": {}}, + *languagemodels.NewContainer("some-other-container"): {"cpp": {}}, }) }, eventuallyTestTimeout, eventuallyTestTick, "Should find deploymentB in workloadmeta store with the correct languages") @@ -388,8 +389,8 @@ func TestOwnersLanguagesMergeAndFlush(t *testing.T) { ownersLanguages := OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockSupportedOwnerA: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("python-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("python-container"): { "python": mockExpiration.Add(10 * time.Minute), }, }, @@ -415,8 +416,8 @@ func TestOwnersLanguagesMergeAndFlush(t *testing.T) { return false } - return reflect.DeepEqual(deploymentA.DetectedLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("python-container"): { + return reflect.DeepEqual(deploymentA.DetectedLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("python-container"): { "python": {}, }, }) @@ -429,11 +430,11 @@ func TestOwnersLanguagesMergeAndFlush(t *testing.T) { mockOwnersLanguagesFromRequest := OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockSupportedOwnerA: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("python-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("python-container"): { "python": mockExpiration.Add(30 * time.Minute), }, - *langUtil.NewContainer("ruby-container"): { + *languagemodels.NewContainer("ruby-container"): { "ruby": mockExpiration.Add(50 * time.Minute), }, }, @@ -457,11 +458,11 @@ func TestOwnersLanguagesMergeAndFlush(t *testing.T) { return false } - return reflect.DeepEqual(deploymentA.DetectedLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("python-container"): { + return reflect.DeepEqual(deploymentA.DetectedLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("python-container"): { "python": {}, }, - *langUtil.NewContainer("ruby-container"): { + *languagemodels.NewContainer("ruby-container"): { "ruby": {}, }, }) @@ -491,8 +492,8 @@ func TestCleanExpiredLanguages(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "ns/deploymentA", }, - DetectedLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + DetectedLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": {}, "java": {}, }, @@ -506,8 +507,8 @@ func TestCleanExpiredLanguages(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "ns/deploymentB", }, - DetectedLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + DetectedLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": {}, "java": {}, }, @@ -519,8 +520,8 @@ func TestCleanExpiredLanguages(t *testing.T) { ownersLanguages := OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockSupportedOwnerA: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": expiredTime, "java": unexpiredTime, }, @@ -528,8 +529,8 @@ func TestCleanExpiredLanguages(t *testing.T) { dirty: false, }, mockSupportedOwnerB: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": expiredTime, "java": expiredTime, }, @@ -548,8 +549,8 @@ func TestCleanExpiredLanguages(t *testing.T) { return false } - return reflect.DeepEqual(deploymentA.DetectedLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + return reflect.DeepEqual(deploymentA.DetectedLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "java": {}, }, }) @@ -581,8 +582,8 @@ func TestHandleKubeAPIServerUnsetEvents(t *testing.T) { ownersLanguages := OwnersLanguages{ containersLanguages: map[langUtil.NamespacedOwnerReference]*containersLanguageWithDirtyFlag{ mockSupportedOwnerA: { - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("some-container"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": unexpiredTime, "java": unexpiredTime, }, @@ -616,8 +617,8 @@ func TestHandleKubeAPIServerUnsetEvents(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "ns/deploymentA", }, - DetectedLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + DetectedLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": {}, "java": {}, }, @@ -634,8 +635,8 @@ func TestHandleKubeAPIServerUnsetEvents(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "ns/deploymentA", }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-container"): { + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-container"): { "python": {}, "java": {}, }, @@ -720,20 +721,20 @@ func TestGetContainersLanguagesFromPodDetail(t *testing.T) { containerslanguages := getContainersLanguagesFromPodDetail(podLanguageDetails, mockExpiration) - expectedContainersLanguages := langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("mono-lang"): { + expectedContainersLanguages := languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("mono-lang"): { "java": mockExpiration, }, - *langUtil.NewContainer("bi-lang"): { + *languagemodels.NewContainer("bi-lang"): { "java": mockExpiration, "cpp": mockExpiration, }, - *langUtil.NewContainer("tri-lang"): { + *languagemodels.NewContainer("tri-lang"): { "java": mockExpiration, "go": mockExpiration, "python": mockExpiration, }, - *langUtil.NewInitContainer("init-mono-lang"): { + *languagemodels.NewInitContainer("init-mono-lang"): { "java": mockExpiration, }, } @@ -841,21 +842,21 @@ func TestGetOwnersLanguages(t *testing.T) { expectedContainersLanguagesA := containersLanguageWithDirtyFlag{ dirty: true, - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("container-1"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("container-1"): { "java": mockExpiration, "cpp": mockExpiration, "go": mockExpiration, }, - *langUtil.NewContainer("container-2"): { + *languagemodels.NewContainer("container-2"): { "java": mockExpiration, "python": mockExpiration, }, - *langUtil.NewInitContainer("init-container-3"): { + *languagemodels.NewInitContainer("init-container-3"): { "java": mockExpiration, "cpp": mockExpiration, }, - *langUtil.NewInitContainer("init-container-4"): { + *languagemodels.NewInitContainer("init-container-4"): { "java": mockExpiration, "python": mockExpiration, }, @@ -864,21 +865,21 @@ func TestGetOwnersLanguages(t *testing.T) { expectedContainersLanguagesB := containersLanguageWithDirtyFlag{ dirty: true, - languages: langUtil.TimedContainersLanguages{ - *langUtil.NewContainer("container-5"): { + languages: languagemodels.TimedContainersLanguages{ + *languagemodels.NewContainer("container-5"): { "python": mockExpiration, "cpp": mockExpiration, "go": mockExpiration, }, - *langUtil.NewContainer("container-6"): { + *languagemodels.NewContainer("container-6"): { "java": mockExpiration, "ruby": mockExpiration, }, - *langUtil.NewInitContainer("init-container-7"): { + *languagemodels.NewInitContainer("init-container-7"): { "java": mockExpiration, "cpp": mockExpiration, }, - *langUtil.NewInitContainer("init-container-8"): { + *languagemodels.NewInitContainer("init-container-8"): { "java": mockExpiration, "python": mockExpiration, }, @@ -904,19 +905,19 @@ func TestGeneratePushEvent(t *testing.T) { tests := []struct { name string - languages langUtil.TimedContainersLanguages + languages languagemodels.TimedContainersLanguages owner langUtil.NamespacedOwnerReference expectedEvent *workloadmeta.Event }{ { name: "unsupported owner", - languages: make(langUtil.TimedContainersLanguages), + languages: make(languagemodels.TimedContainersLanguages), owner: mockUnsupportedOwner, expectedEvent: nil, }, { name: "empty containers languages object with supported owner", - languages: make(langUtil.TimedContainersLanguages), + languages: make(languagemodels.TimedContainersLanguages), owner: mockSupportedOwner, expectedEvent: &workloadmeta.Event{ Type: workloadmeta.EventTypeUnset, @@ -925,26 +926,26 @@ func TestGeneratePushEvent(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "some-ns/some-name", }, - DetectedLanguages: make(langUtil.ContainersLanguages), + DetectedLanguages: make(languagemodels.ContainersLanguages), }, }, }, { name: "non-empty containers languages with supported owner", - languages: langUtil.TimedContainersLanguages{ - langUtil.Container{Name: "container-1", Init: false}: { + languages: languagemodels.TimedContainersLanguages{ + languagemodels.Container{Name: "container-1", Init: false}: { "java": mockExpiration, "cpp": mockExpiration, }, - langUtil.Container{Name: "container-2", Init: false}: { + languagemodels.Container{Name: "container-2", Init: false}: { "java": mockExpiration, "cpp": mockExpiration, }, - langUtil.Container{Name: "container-3", Init: true}: { + languagemodels.Container{Name: "container-3", Init: true}: { "python": mockExpiration, "ruby": mockExpiration, }, - langUtil.Container{Name: "container-4", Init: true}: { + languagemodels.Container{Name: "container-4", Init: true}: { "go": mockExpiration, "java": mockExpiration, }, @@ -957,20 +958,20 @@ func TestGeneratePushEvent(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "some-ns/some-name", }, - DetectedLanguages: langUtil.ContainersLanguages{ - langUtil.Container{Name: "container-1", Init: false}: { + DetectedLanguages: languagemodels.ContainersLanguages{ + languagemodels.Container{Name: "container-1", Init: false}: { "java": {}, "cpp": {}, }, - langUtil.Container{Name: "container-2", Init: false}: { + languagemodels.Container{Name: "container-2", Init: false}: { "java": {}, "cpp": {}, }, - langUtil.Container{Name: "container-3", Init: true}: { + languagemodels.Container{Name: "container-3", Init: true}: { "python": {}, "ruby": {}, }, - langUtil.Container{Name: "container-4", Init: true}: { + languagemodels.Container{Name: "container-4", Init: true}: { "go": {}, "java": {}, }, diff --git a/comp/core/workloadmeta/collectors/internal/kubeapiserver/deployments_test.go b/comp/core/workloadmeta/collectors/internal/kubeapiserver/deployments_test.go index bf1a4af2756a49..cc0929f87d9546 100644 --- a/comp/core/workloadmeta/collectors/internal/kubeapiserver/deployments_test.go +++ b/comp/core/workloadmeta/collectors/internal/kubeapiserver/deployments_test.go @@ -11,8 +11,6 @@ import ( "context" "testing" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" - appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -62,7 +60,7 @@ func Test_DeploymentsFakeKubernetesClient(t *testing.T) { Labels: map[string]string{"test-label": "test-value", "tags.datadoghq.com/env": "env"}, }, Env: "env", - InjectableLanguages: make(langUtil.ContainersLanguages), + InjectableLanguages: make(languagemodels.ContainersLanguages), }, }, }, @@ -107,14 +105,14 @@ func Test_DeploymentsFakeKubernetesClient(t *testing.T) { "internal.dd.datadoghq.com/init.redis.detected_langs": "go,python", }, }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("nginx"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Java): {}, + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("nginx"): { + languagemodels.Go: {}, + languagemodels.Java: {}, }, - *langUtil.NewInitContainer("redis"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Python): {}, + *languagemodels.NewInitContainer("redis"): { + languagemodels.Go: {}, + languagemodels.Python: {}, }, }, }, diff --git a/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment.go b/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment.go index 00cb8b2fdb1151..2cb9eb5e67b589 100644 --- a/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment.go +++ b/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - languagedetectionUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" ddkube "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -40,28 +40,28 @@ func NewDeploymentParser(annotationsExclude []string) (ObjectParser, error) { }, nil } -func updateContainerLanguage(cl languagedetectionUtil.ContainersLanguages, container languagedetectionUtil.Container, languages string) { +func updateContainerLanguage(cl languagemodels.ContainersLanguages, container languagemodels.Container, languages string) { if _, found := cl[container]; !found { - cl[container] = make(languagedetectionUtil.LanguageSet) + cl[container] = make(languagemodels.LanguageSet) } for _, lang := range strings.Split(languages, ",") { - cl[container][languagedetectionUtil.Language(strings.TrimSpace(lang))] = struct{}{} + cl[container][languagemodels.LanguageName(strings.TrimSpace(lang))] = struct{}{} } } func (p deploymentParser) Parse(obj interface{}) workloadmeta.Entity { deployment := obj.(*appsv1.Deployment) - containerLanguages := make(languagedetectionUtil.ContainersLanguages) + containerLanguages := make(languagemodels.ContainersLanguages) for annotation, languages := range deployment.Annotations { - containerName, isInitContainer := languagedetectionUtil.ExtractContainerFromAnnotationKey(annotation) + containerName, isInitContainer := languagemodels.ExtractContainerFromAnnotationKey(annotation) if containerName != "" && languages != "" { updateContainerLanguage( containerLanguages, - languagedetectionUtil.Container{ + languagemodels.Container{ Name: containerName, Init: isInitContainer, }, diff --git a/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment_test.go b/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment_test.go index 6976efbc3bc2f4..616c2f443968f9 100644 --- a/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment_test.go +++ b/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers/deployment_test.go @@ -10,8 +10,6 @@ package kubernetesresourceparsers import ( "testing" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -53,16 +51,16 @@ func TestDeploymentParser_Parse(t *testing.T) { "internal.dd.datadoghq.com/init.nginx-cont.detected_langs": "go,java, python ", }, }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewInitContainer("nginx-cont"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Java): {}, - langUtil.Language(languagemodels.Python): {}, + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewInitContainer("nginx-cont"): { + languagemodels.Go: {}, + languagemodels.Java: {}, + languagemodels.Python: {}, }, - *langUtil.NewContainer("nginx-cont"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Java): {}, - langUtil.Language(languagemodels.Python): {}, + *languagemodels.NewContainer("nginx-cont"): { + languagemodels.Go: {}, + languagemodels.Java: {}, + languagemodels.Python: {}, }, }, }, @@ -110,7 +108,7 @@ func TestDeploymentParser_Parse(t *testing.T) { Env: "env", Service: "service", Version: "version", - InjectableLanguages: make(langUtil.ContainersLanguages), + InjectableLanguages: make(languagemodels.ContainersLanguages), }, deployment: &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ @@ -151,16 +149,16 @@ func TestDeploymentParser_Parse(t *testing.T) { "internal.dd.datadoghq.com/init.nginx-cont.detected_langs": "go,java, python ", }, }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewInitContainer("nginx-cont"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Java): {}, - langUtil.Language(languagemodels.Python): {}, + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewInitContainer("nginx-cont"): { + languagemodels.Go: {}, + languagemodels.Java: {}, + languagemodels.Python: {}, }, - *langUtil.NewContainer("nginx-cont"): { - langUtil.Language(languagemodels.Go): {}, - langUtil.Language(languagemodels.Java): {}, - langUtil.Language(languagemodels.Python): {}, + *languagemodels.NewContainer("nginx-cont"): { + languagemodels.Go: {}, + languagemodels.Java: {}, + languagemodels.Python: {}, }, }, }, diff --git a/comp/core/workloadmeta/def/types.go b/comp/core/workloadmeta/def/types.go index 2207b3440a07ea..72034f42768b51 100644 --- a/comp/core/workloadmeta/def/types.go +++ b/comp/core/workloadmeta/def/types.go @@ -19,7 +19,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" ) @@ -880,11 +879,11 @@ type KubernetesDeployment struct { // InjectableLanguages indicate containers languages that can be injected by the admission controller // These languages are determined by parsing the deployment annotations - InjectableLanguages langUtil.ContainersLanguages + InjectableLanguages languagemodels.ContainersLanguages // DetectedLanguages languages indicate containers languages detected and reported by the language // detection server. - DetectedLanguages langUtil.ContainersLanguages + DetectedLanguages languagemodels.ContainersLanguages } // GetID implements Entity#GetID. @@ -920,7 +919,7 @@ func (d KubernetesDeployment) String(verbose bool) string { _, _ = fmt.Fprintln(&sb, "Service :", d.Service) _, _ = fmt.Fprintln(&sb, "Version :", d.Version) - langPrinter := func(containersLanguages langUtil.ContainersLanguages) { + langPrinter := func(containersLanguages languagemodels.ContainersLanguages) { initContainersInfo := make([]string, 0, len(containersLanguages)) containersInfo := make([]string, 0, len(containersLanguages)) diff --git a/comp/languagedetection/client/clientimpl/client.go b/comp/languagedetection/client/clientimpl/client.go index 590bd7260a3bb6..7dcc13d96a0910 100644 --- a/comp/languagedetection/client/clientimpl/client.go +++ b/comp/languagedetection/client/clientimpl/client.go @@ -11,18 +11,18 @@ import ( "sync" "time" + "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" clientComp "github.com/DataDog/datadog-agent/comp/languagedetection/client" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/option" - "go.uber.org/fx" ) const ( @@ -340,7 +340,7 @@ func (c *client) handleProcessEvent(processEvent workloadmeta.Event, isRetry boo podInfo := c.currentBatch.getOrAddPodInfo(pod.Name, pod.Namespace, &pod.Owners[0]) containerInfo := podInfo.getOrAddContainerInfo(containerName, isInitcontainer) - added := containerInfo.Add(langUtil.Language(process.Language.Name)) + added := containerInfo.Add(process.Language.Name) if added { c.freshlyUpdatedPods[pod.Name] = struct{}{} delete(c.processesWithoutPod, process.ContainerID) diff --git a/comp/languagedetection/client/clientimpl/client_test.go b/comp/languagedetection/client/clientimpl/client_test.go index a7436b4ced74cb..a2d128b7cb39d2 100644 --- a/comp/languagedetection/client/clientimpl/client_test.go +++ b/comp/languagedetection/client/clientimpl/client_test.go @@ -31,8 +31,6 @@ import ( pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/option" - - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" ) type MockDCAClient struct { @@ -116,14 +114,14 @@ func TestClientEnabled(t *testing.T) { func TestClientSend(t *testing.T) { client, respCh := newTestClient(t) - containers := langUtil.ContainersLanguages{ - langUtil.Container{ + containers := languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "java-cont", Init: false, }: { "java": {}, }, - langUtil.Container{ + languagemodels.Container{ Name: "go-cont", Init: true, }: { @@ -170,14 +168,14 @@ func TestClientSend(t *testing.T) { func TestClientSendFreshPods(t *testing.T) { client, _ := newTestClient(t) - containers := langUtil.ContainersLanguages{ - langUtil.Container{ + containers := languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "java-cont", Init: false, }: { "java": {}, }, - langUtil.Container{ + languagemodels.Container{ Name: "go-cont", Init: true, }: { @@ -361,15 +359,15 @@ func TestClientProcessEvent_EveryEntityStored(t *testing.T) { batch{ "nginx-pod-name": { namespace: "nginx-pod-namespace", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name", Init: false, }: { "java": {}, }, - langUtil.Container{ + languagemodels.Container{ Name: "nginx-cont-name", Init: true, }: { @@ -551,14 +549,14 @@ func TestClientProcessEvent_PodMissing(t *testing.T) { batch{ "nginx-pod-name": { namespace: "nginx-pod-namespace", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name", Init: false, }: { "java": {}, }, - langUtil.Container{ + languagemodels.Container{ Name: "nginx-cont-name", Init: true, }: { @@ -970,8 +968,8 @@ func TestRun(t *testing.T) { expectedBatch := batch{ "nginx-pod-name1": { namespace: "nginx-pod-namespace1", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name1", Init: false, }: {"java": {}}, @@ -1023,8 +1021,8 @@ func TestRun(t *testing.T) { b := batch{ "nginx-pod-name2": { namespace: "nginx-pod-namespace2", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name2", Init: false, }: {"go": {}}, @@ -1037,8 +1035,8 @@ func TestRun(t *testing.T) { }, "nginx-pod-name1": { namespace: "nginx-pod-namespace1", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name1", Init: false, }: {"java": {}}, @@ -1051,8 +1049,8 @@ func TestRun(t *testing.T) { }, "python-pod-name3": { namespace: "python-pod-namespace3", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "python-cont-name3", Init: false, }: {"python": {}}, @@ -1094,8 +1092,8 @@ func TestRun(t *testing.T) { b = batch{ "nginx-pod-name1": { namespace: "nginx-pod-namespace1", - containerInfo: langUtil.ContainersLanguages{ - langUtil.Container{ + containerInfo: languagemodels.ContainersLanguages{ + languagemodels.Container{ Name: "nginx-cont-name1", Init: false, }: {"java": {}}, @@ -1124,22 +1122,22 @@ func protoToBatch(protoMessage *pbgo.ParentLanguageAnnotationRequest) batch { res := make(batch) for _, podDetail := range protoMessage.PodDetails { - cInfo := make(langUtil.ContainersLanguages) + cInfo := make(languagemodels.ContainersLanguages) for _, container := range podDetail.ContainerDetails { - languageSet := make(langUtil.LanguageSet) + languageSet := make(languagemodels.LanguageSet) for _, lang := range container.Languages { - languageSet.Add(langUtil.Language(lang.Name)) + languageSet.Add(languagemodels.LanguageName(lang.Name)) } - cInfo[*langUtil.NewContainer(container.ContainerName)] = languageSet + cInfo[*languagemodels.NewContainer(container.ContainerName)] = languageSet } for _, container := range podDetail.InitContainerDetails { - languageSet := make(langUtil.LanguageSet) + languageSet := make(languagemodels.LanguageSet) for _, lang := range container.Languages { - languageSet.Add(langUtil.Language(lang.Name)) + languageSet.Add(languagemodels.LanguageName(lang.Name)) } - cInfo[*langUtil.NewContainer(container.ContainerName)] = languageSet + cInfo[*languagemodels.NewContainer(container.ContainerName)] = languageSet } podInfo := podInfo{ diff --git a/comp/languagedetection/client/clientimpl/util.go b/comp/languagedetection/client/clientimpl/util.go index 06aa3a6f2f5f7e..61582275c897b8 100644 --- a/comp/languagedetection/client/clientimpl/util.go +++ b/comp/languagedetection/client/clientimpl/util.go @@ -10,7 +10,7 @@ import ( "time" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) @@ -28,7 +28,7 @@ func (b batch) getOrAddPodInfo(podName, podnamespace string, ownerRef *workloadm } b[podName] = &podInfo{ namespace: podnamespace, - containerInfo: make(langUtil.ContainersLanguages), + containerInfo: make(languagemodels.ContainersLanguages), ownerRef: ownerRef, } return b[podName] @@ -36,7 +36,7 @@ func (b batch) getOrAddPodInfo(podName, podnamespace string, ownerRef *workloadm type podInfo struct { namespace string - containerInfo langUtil.ContainersLanguages + containerInfo languagemodels.ContainersLanguages ownerRef *workloadmeta.KubernetesPodOwner } @@ -55,10 +55,10 @@ func (p *podInfo) toProto(podName string) *pbgo.PodLanguageDetails { } } -func (p *podInfo) getOrAddContainerInfo(containerName string, isInitContainer bool) langUtil.LanguageSet { +func (p *podInfo) getOrAddContainerInfo(containerName string, isInitContainer bool) languagemodels.LanguageSet { cInfo := p.containerInfo - container := langUtil.Container{ + container := languagemodels.Container{ Name: containerName, Init: isInitContainer, } @@ -66,7 +66,7 @@ func (p *podInfo) getOrAddContainerInfo(containerName string, isInitContainer bo return languageSet } - cInfo[container] = make(langUtil.LanguageSet) + cInfo[container] = make(languagemodels.LanguageSet) return cInfo[container] } diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index c57f5cc83790c1..f0e5be0cd4f26e 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" - "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -3597,10 +3597,10 @@ func mustWebhook(t *testing.T, wmeta workloadmeta.Component, ddConfig config.Com return webhook } -func languageSetOf(languages ...string) util.LanguageSet { - set := util.LanguageSet{} +func languageSetOf(languages ...string) languagemodels.LanguageSet { + set := languagemodels.LanguageSet{} for _, l := range languages { - _ = set.Add(util.Language(l)) + _ = set.Add(languagemodels.LanguageName(l)) } return set } diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_util_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_util_test.go index 245a2e6594c349..44b0fb843cf06b 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_util_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_util_test.go @@ -22,7 +22,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -106,9 +106,9 @@ func TestGetLibListFromDeploymentAnnotations(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "default/dummy", }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("container-1"): {"java": {}, "js": {}}, - *langUtil.NewContainer("container-2"): {"python": {}}, + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("container-1"): {"java": {}, "js": {}}, + *languagemodels.NewContainer("container-2"): {"python": {}}, }, }) @@ -117,9 +117,9 @@ func TestGetLibListFromDeploymentAnnotations(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "custom/dummy", }, - InjectableLanguages: langUtil.ContainersLanguages{ - *langUtil.NewContainer("container-1"): {"ruby": {}, "python": {}}, - *langUtil.NewContainer("container-2"): {"java": {}}, + InjectableLanguages: languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("container-1"): {"ruby": {}, "python": {}}, + *languagemodels.NewContainer("container-2"): {"java": {}}, }, }) diff --git a/pkg/clusteragent/admission/mutate/common/test_utils.go b/pkg/clusteragent/admission/mutate/common/test_utils.go index b29aa8ca36d0c4..24591adf7f2992 100644 --- a/pkg/clusteragent/admission/mutate/common/test_utils.go +++ b/pkg/clusteragent/admission/mutate/common/test_utils.go @@ -24,7 +24,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" - "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -255,7 +255,7 @@ type MockDeployment struct { DeploymentName string Namespace string IsInitContainer bool - Languages util.LanguageSet + Languages languagemodels.LanguageSet } // FakeStoreWithDeployment sets up a fake workloadmeta with the given @@ -269,11 +269,11 @@ func FakeStoreWithDeployment(t *testing.T, deployments []MockDeployment) workloa )) for _, d := range deployments { - langSet := util.LanguageSet{} + langSet := languagemodels.LanguageSet{} for lang := range d.Languages { langSet.Add(lang) } - container := util.Container{ + container := languagemodels.Container{ Name: d.ContainerName, Init: d.IsInitContainer, } @@ -283,7 +283,7 @@ func FakeStoreWithDeployment(t *testing.T, deployments []MockDeployment) workloa Kind: workloadmeta.KindKubernetesDeployment, ID: fmt.Sprintf("%s/%s", d.Namespace, d.DeploymentName), }, - InjectableLanguages: util.ContainersLanguages{ + InjectableLanguages: languagemodels.ContainersLanguages{ container: langSet, }, }) diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go index 2f34546a3dd239..1a02a55a74b87d 100644 --- a/pkg/clusteragent/languagedetection/patcher.go +++ b/pkg/clusteragent/languagedetection/patcher.go @@ -27,6 +27,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -262,7 +263,7 @@ func (lp *languagePatcher) handleDeployment(ctx context.Context, owner langUtil. return err } -func (lp *languagePatcher) generateAnnotationsPatch(currentLangs, newLangs langUtil.ContainersLanguages) map[string]interface{} { +func (lp *languagePatcher) generateAnnotationsPatch(currentLangs, newLangs languagemodels.ContainersLanguages) map[string]interface{} { currentAnnotations := currentLangs.ToAnnotations() targetAnnotations := newLangs.ToAnnotations() diff --git a/pkg/clusteragent/languagedetection/patcher_test.go b/pkg/clusteragent/languagedetection/patcher_test.go index b8716bfab0fd7f..7e264a76165e83 100644 --- a/pkg/clusteragent/languagedetection/patcher_test.go +++ b/pkg/clusteragent/languagedetection/patcher_test.go @@ -31,6 +31,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -130,9 +131,9 @@ func TestRun(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "test-namespace/test-deployment", }, - InjectableLanguages: map[langUtil.Container]langUtil.LanguageSet{ - *langUtil.NewContainer("some-cont"): {"java": {}}, - *langUtil.NewContainer("stale-cont"): {"java": {}, "python": {}}, + InjectableLanguages: map[languagemodels.Container]languagemodels.LanguageSet{ + *languagemodels.NewContainer("some-cont"): {"java": {}}, + *languagemodels.NewContainer("stale-cont"): {"java": {}, "python": {}}, }, }}) @@ -143,12 +144,12 @@ func TestRun(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "test-namespace/" + longContNameDeploymentName, }, - DetectedLanguages: map[langUtil.Container]langUtil.LanguageSet{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + DetectedLanguages: map[languagemodels.Container]languagemodels.LanguageSet{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, // The max allowed annotation key name length in kubernetes is 63 // To test that validation works, we are using a container name of length 69 - *langUtil.NewInitContainer(strings.Repeat("x", 69)): {"ruby": {}, "python": {}}, + *languagemodels.NewInitContainer(strings.Repeat("x", 69)): {"ruby": {}, "python": {}}, }, }, } @@ -162,9 +163,9 @@ func TestRun(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "test-namespace/test-deployment", }, - DetectedLanguages: map[langUtil.Container]langUtil.LanguageSet{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + DetectedLanguages: map[languagemodels.Container]languagemodels.LanguageSet{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, }, }, } @@ -219,9 +220,9 @@ func TestRun(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: "test-namespace/test-deployment", }, - InjectableLanguages: map[langUtil.Container]langUtil.LanguageSet{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + InjectableLanguages: map[languagemodels.Container]languagemodels.LanguageSet{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, }, }, } @@ -235,12 +236,12 @@ func TestRun(t *testing.T) { return false } - return reflect.DeepEqual(deployment.InjectableLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, - }) && reflect.DeepEqual(deployment.DetectedLanguages, langUtil.ContainersLanguages{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + return reflect.DeepEqual(deployment.InjectableLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + }) && reflect.DeepEqual(deployment.DetectedLanguages, languagemodels.ContainersLanguages{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, }) }, eventuallyTestTimeout, @@ -336,12 +337,12 @@ func TestPatcherRetriesFailedPatches(t *testing.T) { Kind: workloadmeta.KindKubernetesDeployment, ID: ns + "/" + deploymentName, }, - DetectedLanguages: map[langUtil.Container]langUtil.LanguageSet{ - *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, - *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + DetectedLanguages: map[languagemodels.Container]languagemodels.LanguageSet{ + *languagemodels.NewContainer("some-cont"): {"java": {}, "python": {}}, + *languagemodels.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, // The max allowed annotation key name length in kubernetes is 63 // To test that failed patches are retried, we are using a container name of length 69 - *langUtil.NewInitContainer(strings.Repeat("x", 69)): {"ruby": {}, "python": {}}, + *languagemodels.NewInitContainer(strings.Repeat("x", 69)): {"ruby": {}, "python": {}}, }, }, } diff --git a/pkg/languagedetection/util/README.md b/pkg/languagedetection/languagemodels/README.md similarity index 100% rename from pkg/languagedetection/util/README.md rename to pkg/languagedetection/languagemodels/README.md diff --git a/pkg/languagedetection/util/annotations.go b/pkg/languagedetection/languagemodels/annotations.go similarity index 91% rename from pkg/languagedetection/util/annotations.go rename to pkg/languagedetection/languagemodels/annotations.go index c99d4135fff8c2..c54345920dfcbc 100644 --- a/pkg/languagedetection/util/annotations.go +++ b/pkg/languagedetection/languagemodels/annotations.go @@ -3,8 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -// Package util provides util type definitions and helper methods for the language detection client and handler -package util +package languagemodels import ( "regexp" diff --git a/pkg/languagedetection/util/annotations_test.go b/pkg/languagedetection/languagemodels/annotations_test.go similarity index 98% rename from pkg/languagedetection/util/annotations_test.go rename to pkg/languagedetection/languagemodels/annotations_test.go index 39730304e3d78e..a699ab24af25a7 100644 --- a/pkg/languagedetection/util/annotations_test.go +++ b/pkg/languagedetection/languagemodels/annotations_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +package languagemodels import ( "testing" diff --git a/pkg/languagedetection/util/containerlanguages.go b/pkg/languagedetection/languagemodels/containerlanguages.go similarity index 99% rename from pkg/languagedetection/util/containerlanguages.go rename to pkg/languagedetection/languagemodels/containerlanguages.go index 10f17f191c3f1b..73b9b084c6055b 100644 --- a/pkg/languagedetection/util/containerlanguages.go +++ b/pkg/languagedetection/languagemodels/containerlanguages.go @@ -3,14 +3,15 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +package languagemodels import ( "fmt" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "reflect" "sort" "strings" + + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) //////////////////////////////// diff --git a/pkg/languagedetection/util/containerlanguages_test.go b/pkg/languagedetection/languagemodels/containerlanguages_test.go similarity index 99% rename from pkg/languagedetection/util/containerlanguages_test.go rename to pkg/languagedetection/languagemodels/containerlanguages_test.go index e5b0c9d65ae780..8c4a490e653ea3 100644 --- a/pkg/languagedetection/util/containerlanguages_test.go +++ b/pkg/languagedetection/languagemodels/containerlanguages_test.go @@ -3,14 +3,15 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +package languagemodels import ( "fmt" - "github.com/stretchr/testify/assert" "reflect" "testing" "time" + + "github.com/stretchr/testify/assert" ) ////////////////////////////////////////// diff --git a/pkg/languagedetection/util/languageset.go b/pkg/languagedetection/languagemodels/languageset.go similarity index 86% rename from pkg/languagedetection/util/languageset.go rename to pkg/languagedetection/languagemodels/languageset.go index 4e83a978738a95..99eb522a89786e 100644 --- a/pkg/languagedetection/util/languageset.go +++ b/pkg/languagedetection/languagemodels/languageset.go @@ -3,16 +3,14 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +package languagemodels import ( - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "reflect" "time" -) -// Language represents a language name -type Language string + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" +) //////////////////////////////// // // @@ -21,11 +19,11 @@ type Language string //////////////////////////////// // LanguageSet represents a set of languages -type LanguageSet map[Language]struct{} +type LanguageSet map[LanguageName]struct{} // Add adds a new language to the language set // returns false if the language is already included in the set, and true otherwise -func (s LanguageSet) Add(language Language) bool { +func (s LanguageSet) Add(language LanguageName) bool { _, found := s[language] s[language] = struct{}{} return !found @@ -49,7 +47,7 @@ func (s LanguageSet) ToProto() []*pbgo.Language { //////////////////////////////// // TimedLanguageSet handles storing sets of languages along with their expiration times -type TimedLanguageSet map[Language]time.Time +type TimedLanguageSet map[LanguageName]time.Time // RemoveExpired removes all expired languages from the set // Returns true if at least one language is expired and removed @@ -66,20 +64,20 @@ func (s TimedLanguageSet) RemoveExpired() bool { // Add adds a new language to the language set with an expiration time // returns false if the language is already included in the set, and true otherwise -func (s TimedLanguageSet) Add(language Language, expiration time.Time) bool { +func (s TimedLanguageSet) Add(language LanguageName, expiration time.Time) bool { _, found := s[language] s[language] = expiration return !found } // Has returns whether the set contains a specific language -func (s TimedLanguageSet) Has(language Language) bool { +func (s TimedLanguageSet) Has(language LanguageName) bool { _, found := s[language] return found } // Remove deletes a language from the language set -func (s TimedLanguageSet) Remove(language Language) { +func (s TimedLanguageSet) Remove(language LanguageName) { delete(s, language) } diff --git a/pkg/languagedetection/util/languageset_test.go b/pkg/languagedetection/languagemodels/languageset_test.go similarity index 97% rename from pkg/languagedetection/util/languageset_test.go rename to pkg/languagedetection/languagemodels/languageset_test.go index dfabf27ba8855d..ee68175c14fe6e 100644 --- a/pkg/languagedetection/util/languageset_test.go +++ b/pkg/languagedetection/languagemodels/languageset_test.go @@ -3,14 +3,15 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +package languagemodels import ( "fmt" - "github.com/stretchr/testify/assert" "reflect" "testing" "time" + + "github.com/stretchr/testify/assert" ) //////////////////////////////// @@ -99,19 +100,19 @@ func TestHas(t *testing.T) { tests := []struct { name string baseSet TimedLanguageSet - target Language + target LanguageName shouldHave bool }{ { name: "has existing item", baseSet: TimedLanguageSet{"java": {}}, - target: "java", + target: Java, shouldHave: true, }, { name: "should not have missing item", baseSet: TimedLanguageSet{"java": {}}, - target: "cpp", + target: Python, shouldHave: false, }, } diff --git a/pkg/languagedetection/util/doc.go b/pkg/languagedetection/util/doc.go index d9cd72953c6eb8..9d8501f4b692df 100644 --- a/pkg/languagedetection/util/doc.go +++ b/pkg/languagedetection/util/doc.go @@ -8,6 +8,6 @@ Package util implements utility functions and types for language detection featu For more information about the language detection and library injection feature, refer to [this] document. -[this]: https://github.com/DataDog/datadog-agent/blob/main/pkg/languagedetection/util/README.md +[this]: https://github.com/DataDog/datadog-agent/blob/main/pkg/languagedetection/languagemodels/README.md */ package util From c2e3683dfedef4789e5274517c49eef76a6c468d Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Fri, 31 Jan 2025 13:55:38 +0100 Subject: [PATCH 91/97] Drop python tests from the `site-packages` (#33595) --- omnibus/config/software/datadog-agent-integrations-py3.rb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/omnibus/config/software/datadog-agent-integrations-py3.rb b/omnibus/config/software/datadog-agent-integrations-py3.rb index 0b88f6fd8f5d1d..48902dd755a3fc 100644 --- a/omnibus/config/software/datadog-agent-integrations-py3.rb +++ b/omnibus/config/software/datadog-agent-integrations-py3.rb @@ -227,8 +227,14 @@ # Removing tests that don't need to be shipped in the embedded folder if windows_target? delete "#{python_3_embedded}/Lib/site-packages/Cryptodome/SelfTest/" + delete "#{python_3_embedded}/Lib/site-packages/openstack/tests/" + delete "#{python_3_embedded}/Lib/site-packages/psutil/tests/" + delete "#{python_3_embedded}/Lib/site-packages/test/" # cm-client else delete "#{install_dir}/embedded/lib/python#{python_version}/site-packages/Cryptodome/SelfTest/" + delete "#{install_dir}/embedded/lib/python#{python_version}/site-packages/openstack/tests/" + delete "#{install_dir}/embedded/lib/python#{python_version}/site-packages/psutil/tests/" + delete "#{install_dir}/embedded/lib/python#{python_version}/site-packages/test/" # cm-client end # Ship `requirements-agent-release.txt` file containing the versions of every check shipped with the agent From 04e2e0a99cf071292de4d5015811f77813cdee39 Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Fri, 31 Jan 2025 14:40:17 +0100 Subject: [PATCH 92/97] Update the create-release-branches task to create the release branch in the agent on a specific commit (#33603) --- tasks/release.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tasks/release.py b/tasks/release.py index 10e148f6c3ab8c..d8b75a34129666 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -667,7 +667,6 @@ def create_and_update_release_branch( """ def _main(): - ctx.run("git pull") print(color_message(f"Branching out to {release_branch}", "bold")) ctx.run(f"git checkout -b {release_branch}") @@ -687,8 +686,7 @@ def _main(): # Perform branch out in all required repositories print(color_message(f"Working repository: {repo}", "bold")) if repo == 'datadog-agent': - with agent_context(ctx, base_branch or get_default_branch(major=get_version_major(release_branch))): - _main() + _main() else: with ctx.cd(f"{base_directory}/{repo}"): # Step 1 - Create a local branch out from the default branch @@ -697,13 +695,16 @@ def _main(): or ctx.run(f"git remote show {upstream} | grep \"HEAD branch\" | sed 's/.*: //'").stdout.strip() ) ctx.run(f"git checkout {main_branch}") + ctx.run("git pull") _main() # TODO: unfreeze is the former name of this task, kept for backward compatibility. Remove in a few weeks. @task(help={'upstream': "Remote repository name (default 'origin')"}, aliases=["unfreeze"]) -def create_release_branches(ctx, base_directory="~/dd", major_version: int = 7, upstream="origin", check_state=True): +def create_release_branches( + ctx, commit, base_directory="~/dd", major_version: int = 7, upstream="origin", check_state=True +): """Create and push release branches in Agent repositories and update them. That includes: @@ -712,11 +713,12 @@ def create_release_branches(ctx, base_directory="~/dd", major_version: int = 7, - updates entries in .gitlab-ci.yml and .gitlab/notify/notify.yml which depend on local branch name Args: + commit: the commit on which the branch should be created (usually the one before the milestone bump) base_directory: Path to the directory where dd repos are cloned, defaults to ~/dd, but can be overwritten. use_worktree: If True, will go to datadog-agent-worktree instead of datadog-agent. Notes: - This requires a Github token (either in the GITHUB_TOKEN environment variable, or in the MacOS keychain), + This requires a GitHub token (either in the GITHUB_TOKEN environment variable, or in the MacOS keychain), with 'repo' permissions. This also requires that there are no local uncommitted changes, that the current branch is 'main' or the release branch, and that no branch named 'release/' already exists locally or upstream. @@ -731,7 +733,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_version: int = 7, # Strings with proper branch/tag names release_branch = current.branch() - with agent_context(ctx, get_default_branch()): + with agent_context(ctx, commit=commit): # Step 0: checks ctx.run("git fetch") From 8b33915cac7ef452687f33809838e10a72f26594 Mon Sep 17 00:00:00 2001 From: Justin Lesko Date: Fri, 31 Jan 2025 09:00:39 -0500 Subject: [PATCH 93/97] [CONTINT-4545] Fix docker image layer digests from the docker collector (#33384) --- .../collectors/internal/docker/docker.go | 37 +++- .../collectors/internal/docker/docker_test.go | 189 ++++++++++++++++++ ...-image-layer-digests-ab42e0cd0d2bd16d.yaml | 11 + 3 files changed, 232 insertions(+), 5 deletions(-) create mode 100644 comp/core/workloadmeta/collectors/internal/docker/docker_test.go create mode 100644 releasenotes/notes/fix-missing-docker-runtime-image-layer-digests-ab42e0cd0d2bd16d.yaml diff --git a/comp/core/workloadmeta/collectors/internal/docker/docker.go b/comp/core/workloadmeta/collectors/internal/docker/docker.go index 2dabd74dee5acf..c952e549638bb4 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/docker.go +++ b/comp/core/workloadmeta/collectors/internal/docker/docker.go @@ -646,26 +646,53 @@ func (c *collector) getImageMetadata(ctx context.Context, imageID string, newSBO OSVersion: imgInspect.OsVersion, Architecture: imgInspect.Architecture, Variant: imgInspect.Variant, - Layers: layersFromDockerHistory(imageHistory), + Layers: layersFromDockerHistoryAndInspect(imageHistory, imgInspect), SBOM: sbom, }, nil } -func layersFromDockerHistory(history []image.HistoryResponseItem) []workloadmeta.ContainerImageLayer { +// it has been observed that docker can return layers that are missing all metadata when inherited from a base container +func isInheritedLayer(layer image.HistoryResponseItem) bool { + return layer.CreatedBy == "" && layer.Size == 0 +} + +func layersFromDockerHistoryAndInspect(history []image.HistoryResponseItem, inspect types.ImageInspect) []workloadmeta.ContainerImageLayer { var layers []workloadmeta.ContainerImageLayer - // Docker returns the layers in reverse-chronological order + // Sanity check our current assumption that there cannot be more RootFS layer IDs than history layers + if len(inspect.RootFS.Layers) > len(history) { + log.Warn("The number of RootFS layers exceeded the number of history layers") + return layers + } + + // inspectIdx tracks the current RootFS layer ID index (in Docker, this corresponds to the Diff ID of a layer) + // NOTE: Docker returns the RootFS layers in chronological order + inspectIdx := 0 + + // Docker returns the history layers in reverse-chronological order for i := len(history) - 1; i >= 0; i-- { created := time.Unix(history[i].Created, 0) + isEmptyLayer := history[i].Size == 0 + isInheritedLayer := isInheritedLayer(history[i]) + + digest := "" + if isInheritedLayer || !isEmptyLayer { + if isInheritedLayer { + log.Debugf("detected an inherited layer for image ID: \"%s\", assigning it digest: \"%s\"", inspect.ID, inspect.RootFS.Layers[inspectIdx]) + } + digest = inspect.RootFS.Layers[inspectIdx] + inspectIdx++ + } + layer := workloadmeta.ContainerImageLayer{ - Digest: history[i].ID, + Digest: digest, SizeBytes: history[i].Size, History: &v1.History{ Created: &created, CreatedBy: history[i].CreatedBy, Comment: history[i].Comment, - EmptyLayer: history[i].Size == 0, + EmptyLayer: isEmptyLayer, }, } diff --git a/comp/core/workloadmeta/collectors/internal/docker/docker_test.go b/comp/core/workloadmeta/collectors/internal/docker/docker_test.go new file mode 100644 index 00000000000000..b234ce5184380e --- /dev/null +++ b/comp/core/workloadmeta/collectors/internal/docker/docker_test.go @@ -0,0 +1,189 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build docker + +package docker + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" +) + +func Test_LayersFromDockerHistoryAndInspect(t *testing.T) { + var emptySize int64 + var noDiffCmd = "ENV var=dummy" + + var nonEmptySize int64 = 1 + var cmd = "COPY dummy.sh ." + + var baseTimeUnix int64 + var baseTime = time.Unix(baseTimeUnix, 0) + + var layerID = "dummy id" + + tests := []struct { + name string + history []image.HistoryResponseItem + inspect types.ImageInspect + expected []workloadmeta.ContainerImageLayer + }{ + { + name: "Layer with CreatedBy and positive Size is assigned a digest", + history: []image.HistoryResponseItem{ + { + Size: nonEmptySize, + CreatedBy: cmd, + Created: baseTimeUnix, + }, + }, + inspect: types.ImageInspect{ + RootFS: types.RootFS{ + Layers: []string{layerID}, + }, + }, + expected: []workloadmeta.ContainerImageLayer{ + { + Digest: layerID, + SizeBytes: nonEmptySize, + History: &v1.History{ + Created: &baseTime, + CreatedBy: cmd, + EmptyLayer: false, + }, + }, + }, + }, + { + name: "Inherited layer with no CreatedBy and no Size is detected and is assigned a digest", + history: []image.HistoryResponseItem{ + { + Size: emptySize, + Created: baseTimeUnix, + }, + }, + inspect: types.ImageInspect{ + RootFS: types.RootFS{ + Layers: []string{layerID}, + }, + }, + expected: []workloadmeta.ContainerImageLayer{ + { + Digest: layerID, + SizeBytes: emptySize, + History: &v1.History{ + Created: &baseTime, + EmptyLayer: true, + }, + }, + }, + }, + { + name: "Layer with CreatedBy and empty Size is NOT assigned a digest", + history: []image.HistoryResponseItem{ + { + Size: emptySize, + CreatedBy: noDiffCmd, + Created: baseTimeUnix, + }, + }, + inspect: types.ImageInspect{ + RootFS: types.RootFS{ + Layers: []string{layerID}, + }, + }, + expected: []workloadmeta.ContainerImageLayer{ + { + SizeBytes: emptySize, + History: &v1.History{ + CreatedBy: noDiffCmd, + Created: &baseTime, + EmptyLayer: true, + }, + }, + }, + }, + { + name: "Mix of layers with and without digests are merged in the proper order", + history: []image.HistoryResponseItem{ + { // "2" in the expected field + Size: nonEmptySize, + Created: baseTimeUnix, + CreatedBy: cmd, + }, + { + Size: emptySize, + Created: baseTimeUnix, + CreatedBy: noDiffCmd, + }, + { // "1" in the expected field + Size: emptySize, + Created: baseTimeUnix, + }, + }, + inspect: types.ImageInspect{ + RootFS: types.RootFS{ + Layers: []string{"1", "2"}, + }, + }, + expected: []workloadmeta.ContainerImageLayer{ + { + Digest: "1", + SizeBytes: emptySize, + History: &v1.History{ + Created: &baseTime, + EmptyLayer: true, + }, + }, + { + SizeBytes: emptySize, + History: &v1.History{ + Created: &baseTime, + CreatedBy: noDiffCmd, + EmptyLayer: true, + }, + }, + { + Digest: "2", + SizeBytes: nonEmptySize, + History: &v1.History{ + Created: &baseTime, + CreatedBy: cmd, + EmptyLayer: false, + }, + }, + }, + }, + { + name: "Number of inspect layers exceeds history layers breaks our assumption and results in no layers returned", + history: []image.HistoryResponseItem{ + { + Size: nonEmptySize, + CreatedBy: cmd, + Created: baseTimeUnix, + }, + }, + inspect: types.ImageInspect{ + RootFS: types.RootFS{ + Layers: []string{layerID, layerID}, + }, + }, + expected: []workloadmeta.ContainerImageLayer{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + layers := layersFromDockerHistoryAndInspect(tt.history, tt.inspect) + assert.ElementsMatchf(t, tt.expected, layers, "Expected layers and actual layers returned do not match") + }) + } +} diff --git a/releasenotes/notes/fix-missing-docker-runtime-image-layer-digests-ab42e0cd0d2bd16d.yaml b/releasenotes/notes/fix-missing-docker-runtime-image-layer-digests-ab42e0cd0d2bd16d.yaml new file mode 100644 index 00000000000000..9206161625778d --- /dev/null +++ b/releasenotes/notes/fix-missing-docker-runtime-image-layer-digests-ab42e0cd0d2bd16d.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Image layer digests will no longer report as "" from Docker runtimes. From 5158a1cbb4dd0d78607cad7672df7e5868db9795 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Fri, 31 Jan 2025 15:59:00 +0100 Subject: [PATCH 94/97] [EBPF] gpu: configure name mappings for eBPF maps (#33621) --- pkg/gpu/probe.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/gpu/probe.go b/pkg/gpu/probe.go index a3246848e1303d..1aa994a8bbd2d8 100644 --- a/pkg/gpu/probe.go +++ b/pkg/gpu/probe.go @@ -188,6 +188,7 @@ func (p *Probe) start() error { if err := p.m.Start(); err != nil { return fmt.Errorf("failed to start manager: %w", err) } + ddebpf.AddNameMappings(p.m.Manager, gpuModuleName) if err := p.attacher.Start(); err != nil { return fmt.Errorf("error starting uprobes attacher: %w", err) @@ -199,6 +200,7 @@ func (p *Probe) start() error { func (p *Probe) Close() { p.attacher.Stop() _ = p.m.Stop(manager.CleanAll) + ddebpf.ClearNameMappings(gpuModuleName) p.consumer.Stop() p.eventHandler.Stop() } From 42efd12271deb20790baf04c07f2b49643e933f0 Mon Sep 17 00:00:00 2001 From: Jaime Fullaondo Date: Fri, 31 Jan 2025 16:08:05 +0100 Subject: [PATCH 95/97] [codeowners] adding new opentelemetry-agent team (#33575) --- .github/CODEOWNERS | 28 +++++++++---------- comp/README.md | 2 +- comp/otelcol/bundle.go | 2 +- .../collector-contrib/def/component.go | 2 +- comp/otelcol/collector/def/component.go | 2 +- comp/otelcol/collector/fx-pipeline/fx.go | 2 +- comp/otelcol/collector/fx/fx.go | 2 +- comp/otelcol/converter/def/component.go | 2 +- .../otelcol/ddflareextension/def/component.go | 2 +- comp/otelcol/logsagentpipeline/component.go | 2 +- 10 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1246d2cb2f9da8..dbfcf1f1d1c191 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -62,7 +62,7 @@ /.github/workflows/go-update-commenter.yml @DataDog/agent-runtimes /.github/workflows/update_dependencies.yml @DataDog/agent-runtimes /.github/workflows/buildimages-update.yml @DataDog/agent-delivery @DataDog/agent-runtimes -/.github/workflows/collector-generate-and-update.yml @DataDog/opentelemetry +/.github/workflows/collector-generate-and-update.yml @DataDog/opentelemetry @DataDog/opentelemetry-agent /.run @DataDog/agent-devx-loops /.run/docker/ @DataDog/container-integrations @DataDog/container-platform @@ -98,7 +98,7 @@ /.gitlab/integration_test/dogstatsd.yml @DataDog/agent-devx-infra @DataDog/agent-metrics /.gitlab/integration_test/include.yml @DataDog/agent-devx-infra /.gitlab/integration_test/linux.yml @DataDog/agent-devx-infra -/.gitlab/integration_test/otel.yml @DataDog/agent-devx-infra @DataDog/opentelemetry +/.gitlab/integration_test/otel.yml @DataDog/agent-devx-infra @DataDog/opentelemetry @DataDog/opentelemetry-agent /.gitlab/internal_image_deploy/internal_image_deploy.yml @DataDog/agent-delivery /.gitlab/internal_kubernetes_deploy/include.yml @DataDog/agent-devx-infra /.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @DataDog/agent-delivery @@ -115,7 +115,7 @@ /.gitlab/binary_build/cluster_agent_cloudfoundry.yml @DataDog/agent-integrations @DataDog/agent-delivery /.gitlab/binary_build/cluster_agent.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/binary_build/fakeintake.yml @DataDog/agent-devx-loops -/.gitlab/binary_build/otel_agent.yml @DataDog/opentelemetry @DataDog/agent-delivery +/.gitlab/binary_build/otel_agent.yml @DataDog/opentelemetry @DataDog/opentelemetry-agent @DataDog/agent-delivery /.gitlab/binary_build/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-delivery /.gitlab/binary_build/system_probe.yml @DataDog/ebpf-platform @DataDog/agent-delivery /.gitlab/binary_build/windows.yml @DataDog/agent-delivery @DataDog/windows-agent @@ -218,7 +218,7 @@ /cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @DataDog/agent-integrations /cmd/cws-instrumentation/ @DataDog/agent-security /cmd/dogstatsd/ @DataDog/agent-metrics -/cmd/otel-agent/ @DataDog/opentelemetry +/cmd/otel-agent/ @DataDog/opentelemetry @DataDog/opentelemetry-agent /cmd/process-agent/ @DataDog/container-intake /cmd/serverless/ @DataDog/serverless @Datadog/serverless-aws /cmd/serverless-init/ @DataDog/serverless @@ -251,7 +251,7 @@ /Dockerfiles/agent/entrypoint.d.windows/ @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/entrypoint.ps1 @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/windows/ @DataDog/container-integrations @DataDog/windows-agent -/Dockerfiles/agent-ot @DataDog/opentelemetry +/Dockerfiles/agent-ot @DataDog/opentelemetry @DataDog/opentelemetry-agent /Dockerfiles/agent/bouncycastle-fips @DataDog/agent-metrics /docs/ @DataDog/agent-devx-loops @@ -300,7 +300,7 @@ /comp/ndmtmp @DataDog/ndm-core /comp/netflow @DataDog/ndm-integrations /comp/networkpath @DataDog/Networks @DataDog/network-device-monitoring -/comp/otelcol @DataDog/opentelemetry +/comp/otelcol @DataDog/opentelemetry @DataDog/opentelemetry-agent /comp/process @DataDog/container-intake /comp/remote-config @DataDog/remote-config /comp/snmptraps @DataDog/ndm-core @@ -372,11 +372,11 @@ /pkg/version/ @DataDog/agent-runtimes /pkg/obfuscate/ @DataDog/agent-apm /pkg/trace/ @DataDog/agent-apm -/pkg/trace/api/otlp*.go @DataDog/opentelemetry -/pkg/trace/traceutil/otel*.go @DataDog/opentelemetry -/pkg/trace/stats/ @DataDog/agent-apm @DataDog/opentelemetry +/pkg/trace/api/otlp*.go @DataDog/opentelemetry @DataDog/opentelemetry-agent +/pkg/trace/traceutil/otel*.go @DataDog/opentelemetry @DataDog/opentelemetry-agent +/pkg/trace/stats/ @DataDog/agent-apm @DataDog/opentelemetry @DataDog/opentelemetry-agent /pkg/trace/telemetry/ @DataDog/apm-trace-storage -/pkg/trace/transform/ @DataDog/opentelemetry +/pkg/trace/transform/ @DataDog/opentelemetry @DataDog/opentelemetry-agent /comp/core/autodiscovery/listeners/ @DataDog/container-platform /comp/core/autodiscovery/listeners/cloudfoundry*.go @DataDog/agent-integrations /comp/core/autodiscovery/listeners/snmp*.go @DataDog/ndm-core @@ -594,7 +594,7 @@ /tasks/winbuildscripts/ @DataDog/windows-agent /tasks/winbuild.py @DataDog/windows-agent /tasks/windows_resources.py @DataDog/windows-agent -/tasks/collector.py @DataDog/opentelemetry +/tasks/collector.py @DataDog/opentelemetry @DataDog/opentelemetry-agent /tasks/components.py @DataDog/agent-runtimes /tasks/components_templates @DataDog/agent-runtimes /tasks/libs/ciproviders/ @DataDog/agent-devx-infra @@ -609,7 +609,7 @@ /test/benchmarks/ @DataDog/agent-metrics /test/benchmarks/kubernetes_state/ @DataDog/container-integrations /test/integration/ @DataDog/container-integrations -/test/integration/docker/otel_agent_build_tests.py @DataDog/opentelemetry +/test/integration/docker/otel_agent_build_tests.py @DataDog/opentelemetry @DataDog/opentelemetry-agent /test/integration/serverless @DataDog/serverless @Datadog/serverless-aws /test/integration/serverless_perf @DataDog/serverless @Datadog/serverless-aws /test/fakeintake/ @DataDog/agent-e2e-testing @DataDog/agent-devx-loops @@ -636,7 +636,7 @@ /test/new-e2e/tests/npm @DataDog/Networks /test/new-e2e/tests/npm/ec2_1host_wkit_test.go @DataDog/Networks @DataDog/windows-kernel-integrations /test/new-e2e/tests/orchestrator @DataDog/container-app -/test/new-e2e/tests/otel @DataDog/opentelemetry +/test/new-e2e/tests/otel @DataDog/opentelemetry @DataDog/opentelemetry-agent /test/new-e2e/tests/process @DataDog/container-intake /test/new-e2e/tests/sysprobe-functional @DataDog/windows-kernel-integrations /test/new-e2e/tests/security-agent-functional @DataDog/windows-kernel-integrations @DataDog/agent-security @@ -648,7 +648,7 @@ /test/new-e2e/tests/installer @DataDog/fleet @DataDog/windows-agent /test/new-e2e/tests/installer/script @DataDog/fleet @DataDog/data-jobs-monitoring /test/new-e2e/tests/gpu @Datadog/ebpf-platform -/test/otel/ @DataDog/opentelemetry +/test/otel/ @DataDog/opentelemetry @DataDog/opentelemetry-agent /test/system/ @DataDog/agent-runtimes /test/system/dogstatsd/ @DataDog/agent-metrics /test/benchmarks/apm_scripts/ @DataDog/agent-apm diff --git a/comp/README.md b/comp/README.md index 22c8aee34ab2fa..8e3b7e8fe2a060 100644 --- a/comp/README.md +++ b/comp/README.md @@ -385,7 +385,7 @@ Package npcollector used to manage network paths ## [comp/otelcol](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/otelcol) (Component Bundle) -*Datadog Team*: opentelemetry +*Datadog Team*: opentelemetry opentelemetry-agent Package otelcol contains the OTLP ingest bundle pipeline to be included into the agent components. diff --git a/comp/otelcol/bundle.go b/comp/otelcol/bundle.go index 7d8623b2f4701e..5ac73dff0c4775 100644 --- a/comp/otelcol/bundle.go +++ b/comp/otelcol/bundle.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Bundle specifies the bundle for the OTLP ingest pipeline. func Bundle() fxutil.BundleOptions { diff --git a/comp/otelcol/collector-contrib/def/component.go b/comp/otelcol/collector-contrib/def/component.go index e32d074fa3f8f3..f906a86e436874 100644 --- a/comp/otelcol/collector-contrib/def/component.go +++ b/comp/otelcol/collector-contrib/def/component.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/otelcol" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Component is the interface for the collector-contrib type Component interface { diff --git a/comp/otelcol/collector/def/component.go b/comp/otelcol/collector/def/component.go index bf2b4b176b8b5b..6f02df0b939282 100644 --- a/comp/otelcol/collector/def/component.go +++ b/comp/otelcol/collector/def/component.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/otlp/datatype" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Component specifies the interface implemented by the collector module. type Component interface { diff --git a/comp/otelcol/collector/fx-pipeline/fx.go b/comp/otelcol/collector/fx-pipeline/fx.go index dd66c7d0af1f4b..d744a7d1797398 100644 --- a/comp/otelcol/collector/fx-pipeline/fx.go +++ b/comp/otelcol/collector/fx-pipeline/fx.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Module for OTel Agent func Module() fxutil.Module { diff --git a/comp/otelcol/collector/fx/fx.go b/comp/otelcol/collector/fx/fx.go index 8d5dc441fa0b2f..f42588437506f4 100644 --- a/comp/otelcol/collector/fx/fx.go +++ b/comp/otelcol/collector/fx/fx.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Module for OTel Agent func Module() fxutil.Module { diff --git a/comp/otelcol/converter/def/component.go b/comp/otelcol/converter/def/component.go index e2dc8cac972d83..4837df3327a358 100644 --- a/comp/otelcol/converter/def/component.go +++ b/comp/otelcol/converter/def/component.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/confmap" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Component implements the confmap.Converter interface. type Component interface { diff --git a/comp/otelcol/ddflareextension/def/component.go b/comp/otelcol/ddflareextension/def/component.go index e49d6bf0392d03..f7bfaab5f9be72 100644 --- a/comp/otelcol/ddflareextension/def/component.go +++ b/comp/otelcol/ddflareextension/def/component.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/extension" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Component specifies the interface implemented by the extension module. type Component interface { diff --git a/comp/otelcol/logsagentpipeline/component.go b/comp/otelcol/logsagentpipeline/component.go index b6cdd56a6d2007..659fb8764ec1f3 100644 --- a/comp/otelcol/logsagentpipeline/component.go +++ b/comp/otelcol/logsagentpipeline/component.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/pipeline" ) -// team: opentelemetry +// team: opentelemetry opentelemetry-agent // Component is the component type. type Component interface { From 9cf6387ef05f69dd092833ce1c5672a8bab806b1 Mon Sep 17 00:00:00 2001 From: Vickenty Fesunov Date: Fri, 31 Jan 2025 16:17:39 +0100 Subject: [PATCH 96/97] AMLII-2248 Fix unpinned pointer panic (#33323) --- pkg/collector/python/test_check.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/collector/python/test_check.go b/pkg/collector/python/test_check.go index 5d633008eb5f96..881fdf40bc7c7a 100644 --- a/pkg/collector/python/test_check.go +++ b/pkg/collector/python/test_check.go @@ -12,6 +12,7 @@ import ( "runtime" "testing" "time" + "unsafe" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -212,7 +213,13 @@ func testRunCheck(t *testing.T) { C.reset_check_mock() C.run_check_return = C.CString("") - warn := []*C.char{C.CString("warn1"), C.CString("warn2"), nil} + + type warnTy *[3]*C.char + var warn warnTy + warn = warnTy(C.malloc(C.size_t(unsafe.Sizeof(*warn)))) + warn[0] = C.CString("warn1") + warn[1] = C.CString("warn2") + warn[2] = nil C.get_checks_warnings_return = &warn[0] err = check.runCheck(false) From a7e58c617398e40e4d9f730f855b5bda963f3d42 Mon Sep 17 00:00:00 2001 From: Dinesh Gurumurthy Date: Fri, 31 Jan 2025 11:57:46 -0500 Subject: [PATCH 97/97] Make infra attributes compatible with OCB (#33082) Co-authored-by: GustavoCaso --- comp/core/tagger/def/go.mod | 14 +- comp/core/tagger/fx-remote/go.mod | 88 +++-- comp/core/tagger/impl-remote/go.mod | 129 ++++--- comp/core/tagger/subscriber/go.mod | 11 +- comp/otelcol/collector-contrib/impl/go.mod | 2 +- comp/otelcol/collector/impl/collector.go | 7 +- .../ddflareextension/impl/configstore_test.go | 5 +- comp/otelcol/ddflareextension/impl/go.mod | 94 +++-- comp/otelcol/ddflareextension/impl/go.sum | 90 +++-- comp/otelcol/otlp/collector.go | 8 +- .../infraattributesprocessor/common.go | 12 +- .../infraattributesprocessor/config_test.go | 3 +- .../infraattributesprocessor/factory.go | 96 ++++- .../infraattributesprocessor/factory_test.go | 16 +- .../processor/infraattributesprocessor/go.mod | 159 +++++++- .../processor/infraattributesprocessor/go.sum | 362 +++++++++++++++++- .../helperclients_test.go | 12 - .../infraattributesprocessor/logs.go | 6 +- .../infraattributesprocessor/logs_test.go | 6 +- .../infraattributesprocessor/metrics.go | 7 +- .../infraattributesprocessor/metrics_test.go | 9 +- .../telemetry_noop.go | 17 + .../telemetry_not_serverless.go | 17 + .../infraattributesprocessor/traces.go | 6 +- .../infraattributesprocessor/traces_test.go | 6 +- go.mod | 16 +- test/otel/testdata/builder-config.yaml | 2 + 27 files changed, 901 insertions(+), 299 deletions(-) create mode 100644 comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_noop.go create mode 100644 comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_not_serverless.go diff --git a/comp/core/tagger/def/go.mod b/comp/core/tagger/def/go.mod index 60a2126ec2cf4e..f6ed901eb55d73 100644 --- a/comp/core/tagger/def/go.mod +++ b/comp/core/tagger/def/go.mod @@ -2,16 +2,10 @@ module github.com/DataDog/datadog-agent/comp/core/tagger/def go 1.22.0 -replace ( - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry - github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths - github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option/ -) - require ( github.com/DataDog/datadog-agent/comp/core/config v0.59.0 github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.7 - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 github.com/DataDog/datadog-agent/pkg/tagger/types v0.59.0 github.com/DataDog/datadog-agent/pkg/tagset v0.59.0 @@ -159,3 +153,9 @@ replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/uti replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure replace github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + +replace github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry + +replace github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths + +replace github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option diff --git a/comp/core/tagger/fx-remote/go.mod b/comp/core/tagger/fx-remote/go.mod index 90f8ce679f531b..a12994a4b43b91 100644 --- a/comp/core/tagger/fx-remote/go.mod +++ b/comp/core/tagger/fx-remote/go.mod @@ -4,68 +4,56 @@ go 1.22.0 toolchain go1.23.3 -replace ( - github.com/DataDog/datadog-agent/comp/core/tagger/def => ../def/ - github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../generic_store/ - github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote => ../impl-remote/ - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry/ - github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api/ - github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils/ - github.com/DataDog/datadog-agent/pkg/util/cache => ../../../../pkg/util/cache/ - github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil/ - github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option/ -) - require ( - github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.60.1 + github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 go.uber.org/fx v1.23.0 ) require ( - github.com/DataDog/datadog-agent/comp/api/api/def v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/config v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.60.0 // indirect + github.com/DataDog/datadog-agent/comp/api/api/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/log/def v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-20250129172314-517df3f51a84 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.7 // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/types v0.60.0 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/telemetry v0.60.1 // indirect - github.com/DataDog/datadog-agent/comp/def v0.60.1 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/api v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/mock v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.64.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/utils v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/proto v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/tagger/types v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/tagset v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/util/cache v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/grpc v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/option v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.61.0 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -206,3 +194,21 @@ replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/uti replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure replace github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + +replace github.com/DataDog/datadog-agent/comp/core/tagger/def => ../def + +replace github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../generic_store + +replace github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote => ../impl-remote + +replace github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry + +replace github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api + +replace github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils + +replace github.com/DataDog/datadog-agent/pkg/util/cache => ../../../../pkg/util/cache + +replace github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil + +replace github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option diff --git a/comp/core/tagger/impl-remote/go.mod b/comp/core/tagger/impl-remote/go.mod index 817308f220706c..5ec2d7914892fd 100644 --- a/comp/core/tagger/impl-remote/go.mod +++ b/comp/core/tagger/impl-remote/go.mod @@ -4,53 +4,26 @@ go 1.22.0 toolchain go1.23.3 -replace ( - github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def/ - github.com/DataDog/datadog-agent/comp/core/config => ../../config/ - github.com/DataDog/datadog-agent/comp/core/log/def => ../../log/def/ - github.com/DataDog/datadog-agent/comp/core/log/mock => ../../log/mock/ - github.com/DataDog/datadog-agent/comp/core/tagger/def => ../def/ - github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../generic_store/ - github.com/DataDog/datadog-agent/comp/core/tagger/subscriber => ../subscriber/ - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry/ - github.com/DataDog/datadog-agent/comp/core/tagger/types => ../types/ - github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../utils/ - github.com/DataDog/datadog-agent/comp/core/telemetry => ../../telemetry/ - github.com/DataDog/datadog-agent/comp/def => ../../../def/ - github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api/ - github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock/ - github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils/ - github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto/ - github.com/DataDog/datadog-agent/pkg/tagger/types => ../../../../pkg/tagger/types/ - github.com/DataDog/datadog-agent/pkg/tagset => ../../../../pkg/tagset/ - github.com/DataDog/datadog-agent/pkg/util/common => ../../../../pkg/util/common/ - github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths/ - github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil/ - github.com/DataDog/datadog-agent/pkg/util/grpc => ../../../../pkg/util/grpc/ - github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http/ - github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option/ -) - require ( - github.com/DataDog/datadog-agent/comp/api/api/def v0.60.0 - github.com/DataDog/datadog-agent/comp/core/config v0.60.0 + github.com/DataDog/datadog-agent/comp/api/api/def v0.61.0 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 github.com/DataDog/datadog-agent/comp/core/log/def v0.64.0-devel github.com/DataDog/datadog-agent/comp/core/log/mock v0.60.0 - github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.7 - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.60.0 github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.60.0 - github.com/DataDog/datadog-agent/comp/core/telemetry v0.60.1 - github.com/DataDog/datadog-agent/comp/def v0.60.1 - github.com/DataDog/datadog-agent/pkg/config/mock v0.60.0 + github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 + github.com/DataDog/datadog-agent/comp/def v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/mock v0.61.0 github.com/DataDog/datadog-agent/pkg/proto v0.60.0 github.com/DataDog/datadog-agent/pkg/tagger/types v0.60.0 github.com/DataDog/datadog-agent/pkg/tagset v0.60.0 github.com/DataDog/datadog-agent/pkg/util/cache v0.61.0 github.com/DataDog/datadog-agent/pkg/util/common v0.60.0 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 github.com/DataDog/datadog-agent/pkg/util/grpc v0.60.0 github.com/DataDog/datadog-agent/pkg/util/http v0.60.0 github.com/cenkalti/backoff v2.2.1+incompatible @@ -66,31 +39,31 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.60.0 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.60.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/api v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.64.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/utils v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.64.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/option v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.60.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect - github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.61.0 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -197,3 +170,49 @@ replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/ut replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil replace github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + +replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def + +replace github.com/DataDog/datadog-agent/comp/core/config => ../../config + +replace github.com/DataDog/datadog-agent/comp/core/log/def => ../../log/def + +replace github.com/DataDog/datadog-agent/comp/core/log/mock => ../../log/mock + +replace github.com/DataDog/datadog-agent/comp/core/tagger/def => ../def + +replace github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../generic_store + +replace github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry + +replace github.com/DataDog/datadog-agent/comp/core/tagger/types => ../types + +replace github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../utils + +replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../telemetry + +replace github.com/DataDog/datadog-agent/comp/def => ../../../def + +replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock + +replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto + +replace github.com/DataDog/datadog-agent/pkg/tagger/types => ../../../../pkg/tagger/types + +replace github.com/DataDog/datadog-agent/pkg/tagset => ../../../../pkg/tagset + +replace github.com/DataDog/datadog-agent/pkg/util/common => ../../../../pkg/util/common + +replace github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths + +replace github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil + +replace github.com/DataDog/datadog-agent/pkg/util/grpc => ../../../../pkg/util/grpc + +replace github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http + +replace github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option + +replace github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api + +replace github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils diff --git a/comp/core/tagger/subscriber/go.mod b/comp/core/tagger/subscriber/go.mod index 3a12b65077322d..a138fc3b33a7bb 100644 --- a/comp/core/tagger/subscriber/go.mod +++ b/comp/core/tagger/subscriber/go.mod @@ -2,13 +2,8 @@ module github.com/DataDog/datadog-agent/comp/core/tagger/subscriber go 1.22.0 -replace ( - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry - github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option/ -) - require ( - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 github.com/DataDog/datadog-agent/comp/core/telemetry v0.60.1 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.60.1 @@ -97,3 +92,7 @@ replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/ut replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil replace github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + +replace github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../telemetry + +replace github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod index 4973526f6198c0..1b5fef691d80fd 100644 --- a/comp/otelcol/collector-contrib/impl/go.mod +++ b/comp/otelcol/collector-contrib/impl/go.mod @@ -7,7 +7,7 @@ go 1.23.0 toolchain go1.23.5 require ( - github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.0.0-20250129172314-517df3f51a84 github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 diff --git a/comp/otelcol/collector/impl/collector.go b/comp/otelcol/collector/impl/collector.go index 5590fb179815cf..9e683df396b51b 100644 --- a/comp/otelcol/collector/impl/collector.go +++ b/comp/otelcol/collector/impl/collector.go @@ -30,7 +30,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" corelog "github.com/DataDog/datadog-agent/comp/core/log/def" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" compdef "github.com/DataDog/datadog-agent/comp/def" collectorcontrib "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def" collector "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" @@ -124,17 +123,13 @@ func newConfigProviderSettings(uris []string, converter confmap.Converter, enhan } } -func generateID(group, resource, namespace, name string) string { - return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) -} - func addFactories(reqs Requires, factories otelcol.Factories) { if v, ok := reqs.LogsAgent.Get(); ok { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, v, reqs.SourceProvider, reqs.StatsdClientWrapper) } else { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, nil, reqs.SourceProvider, reqs.StatsdClientWrapper) } - factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(reqs.Tagger, generateID) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactoryForAgent(reqs.Tagger) factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() factories.Extensions[ddextension.Type] = ddextension.NewFactoryForAgent(&factories, newConfigProviderSettings(reqs.URIs, reqs.Converter, false)) } diff --git a/comp/otelcol/ddflareextension/impl/configstore_test.go b/comp/otelcol/ddflareextension/impl/configstore_test.go index 21b47ce36d1339..3df083334db13f 100644 --- a/comp/otelcol/ddflareextension/impl/configstore_test.go +++ b/comp/otelcol/ddflareextension/impl/configstore_test.go @@ -38,7 +38,7 @@ import ( // this is only used for config unmarshalling. func addFactories(factories otelcol.Factories) { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(nil, nil, nil, nil, nil) - factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(nil, nil) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactoryForAgent(nil) factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() factories.Extensions[Type] = NewFactoryForAgent(nil, otelcol.ConfigProviderSettings{}) } @@ -149,7 +149,6 @@ func TestGetConfDump(t *testing.T) { assertEqual(t, expectedStringMap, actualStringMap) }) - } func confmapFromResolverSettings(t *testing.T, resolverSettings confmap.ResolverSettings) *confmap.Conf { @@ -165,7 +164,7 @@ func uriFromFile(filename string) []string { } func yamlBytesToMap(bytesConfig []byte) (map[string]any, error) { - var configMap = map[string]interface{}{} + configMap := map[string]interface{}{} err := yaml.Unmarshal(bytesConfig, configMap) if err != nil { return nil, err diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod index 8a5ada60b18862..bcf03cf3a5725c 100644 --- a/comp/otelcol/ddflareextension/impl/go.mod +++ b/comp/otelcol/ddflareextension/impl/go.mod @@ -10,11 +10,18 @@ replace ( github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../core/flare/types github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface => ../../../core/hostname/hostnameinterface github.com/DataDog/datadog-agent/comp/core/log/def => ../../../core/log/def + github.com/DataDog/datadog-agent/comp/core/log/fx => ../../../core/log/fx github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../core/log/mock github.com/DataDog/datadog-agent/comp/core/secrets => ../../../core/secrets github.com/DataDog/datadog-agent/comp/core/status => ../../../core/status + github.com/DataDog/datadog-agent/comp/core/tagger/def => ../../../core/tagger/def/ + github.com/DataDog/datadog-agent/comp/core/tagger/fx => ../../../core/tagger/fx/ + github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote => ../../../core/tagger/fx-remote/ + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../../../core/tagger/generic_store/ + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote => ../../../core/tagger/impl-remote/ github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../../core/tagger/origindetection github.com/DataDog/datadog-agent/comp/core/tagger/tags => ../../../core/tagger/tags + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../../../core/tagger/telemetry/ github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../core/tagger/types github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../core/tagger/utils github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../core/telemetry @@ -107,7 +114,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.58.0 - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.58.0 + github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.59.0-rc.6 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.59.0 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.59.0 github.com/DataDog/datadog-agent/pkg/api v0.61.0 @@ -165,9 +172,22 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect - github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/api/api/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/log/fx v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/log/impl v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.7 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.59.0-rc.6 // indirect + github.com/DataDog/datadog-agent/pkg/util/cache v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/grpc v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect @@ -187,7 +207,7 @@ require ( cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect @@ -202,10 +222,10 @@ require ( github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/log/def v0.64.0-devel // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 // indirect - github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/status v0.59.0-rc.6 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.64.0-devel // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.60.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.60.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect @@ -217,7 +237,6 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.59.0-rc.6 // indirect github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.61.0 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.57.0-devel.0.20240718200853-81bf3b2e412d // indirect github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.64.0-devel // indirect github.com/DataDog/datadog-agent/comp/trace/agent/def v0.59.0-rc.6 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/def v0.61.0 // indirect @@ -243,33 +262,33 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sources v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/metrics v0.59.0-rc.6 // indirect github.com/DataDog/datadog-agent/pkg/obfuscate v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/process/util/api v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel // indirect github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/serializer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/status/health v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/tagger/types v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/tagset v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/trace v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/common v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/json v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/option v0.64.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/sort v0.60.0 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 // indirect @@ -309,7 +328,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.118.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/docker v27.4.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -328,9 +347,9 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect @@ -344,7 +363,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect @@ -355,7 +374,7 @@ require ( github.com/gorilla/websocket v1.5.1 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect - github.com/hashicorp/consul/api v1.30.0 // indirect + github.com/hashicorp/consul/api v1.31.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -364,7 +383,6 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect @@ -450,8 +468,6 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stormcat24/protodep v0.1.8 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/tidwall/gjson v1.18.0 // indirect - github.com/tidwall/pretty v1.2.1 // indirect github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect @@ -482,28 +498,28 @@ require ( go.opentelemetry.io/collector/semconv v0.118.0 // indirect go.opentelemetry.io/collector/service v0.118.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect go.opentelemetry.io/contrib/zpages v0.56.0 // indirect go.opentelemetry.io/otel v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect go.opentelemetry.io/otel/metric v1.33.0 // indirect go.opentelemetry.io/otel/sdk v1.33.0 // indirect go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect go.opentelemetry.io/otel/trace v1.33.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect @@ -517,7 +533,7 @@ require ( golang.org/x/sys v0.29.0 // indirect golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.29.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.199.0 // indirect @@ -529,8 +545,8 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect - k8s.io/api v0.31.3 // indirect - k8s.io/apimachinery v0.31.3 // indirect + k8s.io/api v0.31.4 // indirect + k8s.io/apimachinery v0.31.4 // indirect k8s.io/client-go v0.31.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f // indirect @@ -542,3 +558,9 @@ require ( // github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version replace github.com/golang/mock => github.com/golang/mock v1.6.0 + +replace github.com/DataDog/datadog-agent/comp/core/log/impl => ../../../core/log/impl + +replace github.com/DataDog/datadog-agent/pkg/util/cache => ../../../../pkg/util/cache + +replace github.com/DataDog/datadog-agent/pkg/util/grpc => ../../../../pkg/util/grpc diff --git a/comp/otelcol/ddflareextension/impl/go.sum b/comp/otelcol/ddflareextension/impl/go.sum index 6e70ebd8ce8e46..07110173322097 100644 --- a/comp/otelcol/ddflareextension/impl/go.sum +++ b/comp/otelcol/ddflareextension/impl/go.sum @@ -37,8 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= @@ -160,6 +160,7 @@ github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -221,8 +222,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -282,12 +283,12 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -343,8 +344,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -400,13 +401,15 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= -github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= -github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= +github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= +github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -435,8 +438,8 @@ github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFO github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= -github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -814,13 +817,12 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= -github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.8 h1:2qDSGdAdjaY3PEvHRva+9UFqgk+ef7cOiW1Qn5JH1y0= @@ -999,10 +1001,10 @@ go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsb go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo= go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= @@ -1015,20 +1017,20 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= @@ -1041,10 +1043,11 @@ go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCt go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -1052,15 +1055,18 @@ go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1245,6 +1251,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1290,8 +1297,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1312,6 +1319,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1400,6 +1408,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= @@ -1469,6 +1478,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 h1:umQdVO0Ytx+kYadhuJNjFtDgIsIEBnKrOTvNuu8ClKI= @@ -1483,10 +1493,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= -k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= -k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= -k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/api v0.31.4 h1:I2QNzitPVsPeLQvexMEsj945QumYraqv9m74isPDKhM= +k8s.io/api v0.31.4/go.mod h1:d+7vgXLvmcdT1BCo79VEgJxHHryww3V5np2OYTr6jdw= +k8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM= +k8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go index 099dce19ac85c6..7d0e55f7c586e4 100644 --- a/comp/otelcol/otlp/collector.go +++ b/comp/otelcol/otlp/collector.go @@ -33,7 +33,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor" @@ -93,11 +92,6 @@ func (t *tagEnricher) Enrich(_ context.Context, extraTags []string, dimensions * return enrichedTags } -func generateID(group, resource, namespace, name string) string { - - return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) -} - func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message.Message, tagger tagger.Component) ( otelcol.Factories, error, @@ -133,7 +127,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message processorFactories := []processor.Factory{batchprocessor.NewFactory()} if tagger != nil { - processorFactories = append(processorFactories, infraattributesprocessor.NewFactory(tagger, generateID)) + processorFactories = append(processorFactories, infraattributesprocessor.NewFactoryForAgent(tagger)) } processors, err := processor.MakeFactoryMap(processorFactories...) if err != nil { diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go index 3ea280b2b2c561..e3439fcbe26c50 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go @@ -25,18 +25,14 @@ var unifiedServiceTagMap = map[string][]string{ tags.Version: {conventions.AttributeServiceVersion}, } -// GenerateKubeMetadataEntityID is a function that generates an entity ID for a Kubernetes resource. -type GenerateKubeMetadataEntityID func(group, resource, namespace, name string) string - // processInfraTags collects entities/tags from resourceAttributes and adds infra tags to resourceAttributes func processInfraTags( logger *zap.Logger, tagger taggerClient, cardinality types.TagCardinality, - generateID GenerateKubeMetadataEntityID, resourceAttributes pcommon.Map, ) { - entityIDs := entityIDsFromAttributes(resourceAttributes, generateID) + entityIDs := entityIDsFromAttributes(resourceAttributes) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags @@ -91,7 +87,7 @@ func processInfraTags( // TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method // entityIDsFromAttributes gets the entity IDs from resource attributes. // If not found, an empty string slice is returned. -func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataEntityID) []types.EntityID { +func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { entityIDs := make([]types.EntityID, 0, 8) // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { @@ -113,11 +109,11 @@ func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataE } } if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "namespaces", "", namespace.AsString()))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, fmt.Sprintf("/namespaces//%s", namespace.AsString()))) } if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "nodes", "", nodeName.AsString()))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, fmt.Sprintf("/nodes//%s", nodeName.AsString()))) } if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok { entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString())) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go index 5cf3de71176518..2dac2b77260923 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go @@ -35,8 +35,7 @@ func TestLoadingConfigStrictLogs(t *testing.T) { for _, tt := range tests { t.Run(tt.id.String(), func(t *testing.T) { tc := newTestTaggerClient() - gc := newTestGenerateIDClient().generateID - f := NewFactory(tc, gc) + f := NewFactoryForAgent(tc) cfg := f.CreateDefaultConfig() sub, err := cm.Sub(tt.id.String()) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go index f513b8a59ee7c6..a0ac7bcc3c6b6a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go @@ -7,9 +7,20 @@ package infraattributesprocessor import ( "context" + "fmt" + "sync" - "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/comp/core/config" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + "go.uber.org/fx" + logfx "github.com/DataDog/datadog-agent/comp/core/log/fx" + tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + remoteTaggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote" + taggerTypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/pkg/api/security" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" @@ -18,18 +29,61 @@ import ( var processorCapabilities = consumer.Capabilities{MutatesData: true} -// TODO: Remove tagger and generateID as depenendencies to enable future import of -// infraattributesprocessor by external go packages like ocb type factory struct { - tagger taggerClient - generateID GenerateKubeMetadataEntityID + tagger taggerClient + mu sync.Mutex +} + +func (f *factory) initializeTaggerClient() error { + // Ensure that the tagger is initialized only once. + f.mu.Lock() + defer f.mu.Unlock() + if f.tagger != nil { + return nil + } + var client taggerClient + app := fx.New( + fx.Provide(func() config.Component { + return pkgconfigsetup.Datadog() + }), + fx.Provide(func(_ config.Component) log.Params { + return log.ForDaemon("otelcol", "log_file", pkgconfigsetup.DefaultOTelAgentLogFile) + }), + logfx.Module(), + telemetryModule(), + fxutil.FxAgentBase(), + remoteTaggerfx.Module(tagger.RemoteParams{ + RemoteTarget: func(c config.Component) (string, error) { + return fmt.Sprintf(":%v", c.GetInt("cmd_port")), nil + }, + RemoteTokenFetcher: func(c config.Component) func() (string, error) { + return func() (string, error) { + return security.FetchAuthToken(c) + } + }, + RemoteFilter: taggerTypes.NewMatchAllFilter(), + }), + fx.Provide(func(t tagger.Component) taggerClient { + return t + }), + fx.Populate(&client), + ) + if err := app.Err(); err != nil { + return err + } + f.tagger = client + return nil } // NewFactory returns a new factory for the InfraAttributes processor. -func NewFactory(tagger taggerClient, generateID GenerateKubeMetadataEntityID) processor.Factory { +func NewFactory() processor.Factory { + return NewFactoryForAgent(nil) +} + +// NewFactoryForAgent returns a new factory for the InfraAttributes processor. +func NewFactoryForAgent(tagger taggerClient) processor.Factory { f := &factory{ - tagger: tagger, - generateID: generateID, + tagger: tagger, } return processor.NewFactory( @@ -43,7 +97,7 @@ func NewFactory(tagger taggerClient, generateID GenerateKubeMetadataEntityID) pr func (f *factory) createDefaultConfig() component.Config { return &Config{ - Cardinality: types.LowCardinality, + Cardinality: taggerTypes.LowCardinality, } } @@ -53,7 +107,13 @@ func (f *factory) createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger, f.generateID) + if f.tagger == nil { + err := f.initializeTaggerClient() + if err != nil { + return nil, err + } + } + iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger) if err != nil { return nil, err } @@ -72,7 +132,13 @@ func (f *factory) createLogsProcessor( cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { - iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger, f.generateID) + if f.tagger == nil { + err := f.initializeTaggerClient() + if err != nil { + return nil, err + } + } + iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger) if err != nil { return nil, err } @@ -91,7 +157,13 @@ func (f *factory) createTracesProcessor( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger, f.generateID) + if f.tagger == nil { + err := f.initializeTaggerClient() + if err != nil { + return nil, err + } + } + iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger) if err != nil { return nil, err } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go index 48432ef0fe3852..adc7f4c5ed5db3 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go @@ -21,8 +21,7 @@ import ( func TestType(t *testing.T) { tc := newTestTaggerClient() - gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) pType := factory.Type() assert.Equal(t, pType, Type) @@ -30,8 +29,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { tc := newTestTaggerClient() - gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) cfg := factory.CreateDefaultConfig() assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } @@ -54,11 +52,10 @@ func TestCreateProcessors(t *testing.T) { cm, err := confmaptest.LoadConf(filepath.Join("testdata", tt.configName)) require.NoError(t, err) tc := newTestTaggerClient() - gc := newTestGenerateIDClient().generateID for k := range cm.ToStringMap() { // Check if all processor variations that are defined in test config can be actually created - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) cfg := factory.CreateDefaultConfig() sub, err := cm.Sub(k) @@ -94,3 +91,10 @@ func TestCreateProcessors(t *testing.T) { }) } } + +func TestInitializeTagger(t *testing.T) { + f := &factory{} + err := f.initializeTaggerClient() + assert.NoError(t, err) + assert.NotNil(t, f.tagger) +} diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod index 662045939ab2d5..fb349e69a0fd0a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod @@ -6,24 +6,33 @@ replace ( github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../../api/api/def github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../../core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../../core/flare/types + github.com/DataDog/datadog-agent/comp/core/log/fx => ../../../../../core/log/fx github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../../core/secrets github.com/DataDog/datadog-agent/comp/core/tagger/common => ../../../../../core/tagger/common + github.com/DataDog/datadog-agent/comp/core/tagger/def => ../../../../../core/tagger/def + github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote => ../../../../../core/tagger/fx-remote + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store => ../../../../../core/tagger/generic_store + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote => ../../../../../core/tagger/impl-remote github.com/DataDog/datadog-agent/comp/core/tagger/tags => ../../../../../core/tagger/tags + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry => ../../../../../core/tagger/telemetry github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../../../core/tagger/types github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../../../core/tagger/utils github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../../core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../../../def + github.com/DataDog/datadog-agent/pkg/api => ../../../../../../pkg/api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/util/cache => ../../../../../../pkg/util/cache github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../../../pkg/util/log/setup github.com/DataDog/datadog-agent/pkg/util/option => ../../../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber @@ -34,8 +43,17 @@ replace ( ) require ( + github.com/DataDog/datadog-agent/comp/core/config v0.64.0-devel + github.com/DataDog/datadog-agent/comp/core/log/def v0.64.0-devel + github.com/DataDog/datadog-agent/comp/core/log/fx v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.64.0-devel - github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.60.0 + github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 + github.com/DataDog/datadog-agent/pkg/api v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/component v0.118.0 go.opentelemetry.io/collector/component/componenttest v0.118.0 @@ -46,8 +64,9 @@ require ( go.opentelemetry.io/collector/processor v0.118.0 go.opentelemetry.io/collector/processor/processortest v0.118.0 go.opentelemetry.io/collector/semconv v0.118.0 - go.opentelemetry.io/otel/metric v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 + go.uber.org/fx v1.23.0 go.uber.org/zap v1.27.0 ) @@ -57,36 +76,156 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/api/api/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/log/impl v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.7 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.60.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.64.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.64.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/tagger/types v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/tagset v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/cache v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/common v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/grpc v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/http v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.64.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.64.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/sort v0.60.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.61.0 // indirect + github.com/DataDog/viper v1.14.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/ebitengine/purego v0.8.1 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/twmb/murmur3 v1.1.8 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect go.opentelemetry.io/collector/pipeline v0.118.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/grpc v1.69.4 // indirect google.golang.org/protobuf v1.36.3 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/DataDog/datadog-agent/comp/core/config => ../../../../../core/config + +replace github.com/DataDog/datadog-agent/comp/core/log/def => ../../../../../core/log/def + +replace github.com/DataDog/datadog-agent/comp/core/log/impl => ../../../../../core/log/impl + +replace github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../../../core/log/mock + +replace github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../../../../core/tagger/origindetection + +replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock + +replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../../../pkg/config/structure + +replace github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils + +replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../../../pkg/proto + +replace github.com/DataDog/datadog-agent/pkg/tagger/types => ../../../../../../pkg/tagger/types + +replace github.com/DataDog/datadog-agent/pkg/tagset => ../../../../../../pkg/tagset + +replace github.com/DataDog/datadog-agent/pkg/util/common => ../../../../../../pkg/util/common + +replace github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../../../pkg/util/defaultpaths + +replace github.com/DataDog/datadog-agent/pkg/util/grpc => ../../../../../../pkg/util/grpc + +replace github.com/DataDog/datadog-agent/pkg/util/http => ../../../../../../pkg/util/http + +replace github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../../../pkg/util/sort + +replace github.com/DataDog/datadog-agent/pkg/version => ../../../../../../pkg/version diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum index bf39b9338e82fc..ea581ed1e3569f 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum @@ -1,57 +1,284 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= +github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= +github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= @@ -84,46 +311,113 @@ go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g9 go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -131,14 +425,50 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go index 3069eec0acd565..7c8485d04c8a77 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go @@ -6,8 +6,6 @@ package infraattributesprocessor import ( - "fmt" - "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -32,13 +30,3 @@ func (t *testTaggerClient) Tag(entityID types.EntityID, _ types.TagCardinality) func (t *testTaggerClient) GlobalTags(_ types.TagCardinality) ([]string, error) { return t.tagMap[types.NewEntityID("internal", "global-entity-id").String()], nil } - -type testGenerateIDClient struct{} - -func newTestGenerateIDClient() *testGenerateIDClient { - return &testGenerateIDClient{} -} - -func (t *testGenerateIDClient) generateID(group, resource, namespace, name string) string { - return fmt.Sprintf("%s/%s/%s/%s", group, resource, namespace, name) -} diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go index ba66a8d4b8118e..762786dbd3fb16 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go @@ -19,15 +19,13 @@ type infraAttributesLogProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality - generateID GenerateKubeMetadataEntityID } -func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesLogProcessor, error) { +func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesLogProcessor, error) { ialp := &infraAttributesLogProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, - generateID: generateID, } set.Logger.Info("Logs Infra Attributes Processor configured") @@ -38,7 +36,7 @@ func (ialp *infraAttributesLogProcessor) processLogs(_ context.Context, ld plog. rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { resourceAttributes := rls.At(i).Resource().Attributes() - processInfraTags(ialp.logger, ialp.tagger, ialp.cardinality, ialp.generateID, resourceAttributes) + processInfraTags(ialp.logger, ialp.tagger, ialp.cardinality, resourceAttributes) } return ld, nil } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go index 3fc7d9ba40c0e2..b4b62cccf3dbd4 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go @@ -82,7 +82,8 @@ var ( "k8s.namespace.name": "namespace", "k8s.deployment.name": "deployment", }, - }}), + }, + }), outResourceAttributes: []map[string]any{ { "global": "tag", @@ -127,9 +128,8 @@ func TestInfraAttributesLogProcessor(t *testing.T) { tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} - gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) flp, err := factory.CreateLogs( context.Background(), processortest.NewNopSettings(), diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go index 481c9ad9c090bc..42b3049563aa5c 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go @@ -7,6 +7,7 @@ package infraattributesprocessor import ( "context" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" "go.opentelemetry.io/collector/pdata/pmetric" @@ -18,15 +19,13 @@ type infraAttributesMetricProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality - generateID GenerateKubeMetadataEntityID } -func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesMetricProcessor, error) { +func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesMetricProcessor, error) { iamp := &infraAttributesMetricProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, - generateID: generateID, } set.Logger.Info("Metric Infra Attributes Processor configured") return iamp, nil @@ -36,7 +35,7 @@ func (iamp *infraAttributesMetricProcessor) processMetrics(_ context.Context, md rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { resourceAttributes := rms.At(i).Resource().Attributes() - processInfraTags(iamp.logger, iamp.tagger, iamp.cardinality, iamp.generateID, resourceAttributes) + processInfraTags(iamp.logger, iamp.tagger, iamp.cardinality, resourceAttributes) } return md, nil } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go index fad3b1f3b4fc6a..4bfcbf2180c003 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go @@ -85,7 +85,8 @@ var ( "k8s.namespace.name": "namespace", "k8s.deployment.name": "deployment", }, - }}), + }, + }), outResourceAttributes: []map[string]any{ { "global": "tag", @@ -131,9 +132,8 @@ func TestInfraAttributesMetricProcessor(t *testing.T) { tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} - gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) fmp, err := factory.CreateMetrics( context.Background(), processortest.NewNopSettings(), @@ -264,10 +264,9 @@ func TestEntityIDsFromAttributes(t *testing.T) { entityIDs: []string{"process://process_pid_goes_here"}, }, } - gc := newTestGenerateIDClient().generateID for _, testInstance := range tests { t.Run(testInstance.name, func(t *testing.T) { - entityIDs := entityIDsFromAttributes(testInstance.attrs, gc) + entityIDs := entityIDsFromAttributes(testInstance.attrs) entityIDsAsStrings := make([]string, len(entityIDs)) for idx, entityID := range entityIDs { entityIDsAsStrings[idx] = entityID.String() diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_noop.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_noop.go new file mode 100644 index 00000000000000..49812b78550500 --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_noop.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build serverless + +package infraattributesprocessor + +import ( + "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +func telemetryModule() fxutil.Module { + return noopsimpl.Module() +} diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_not_serverless.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_not_serverless.go new file mode 100644 index 00000000000000..c3326a849d334e --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/telemetry_not_serverless.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build !serverless + +package infraattributesprocessor + +import ( + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +func telemetryModule() fxutil.Module { + return telemetryimpl.Module() +} diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go index f758e84556cb4a..e7f795ac03ba41 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go @@ -19,15 +19,13 @@ type infraAttributesSpanProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality - generateID GenerateKubeMetadataEntityID } -func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesSpanProcessor, error) { +func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesSpanProcessor, error) { iasp := &infraAttributesSpanProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, - generateID: generateID, } set.Logger.Info("Span Infra Attributes Processor configured") return iasp, nil @@ -37,7 +35,7 @@ func (iasp *infraAttributesSpanProcessor) processTraces(_ context.Context, td pt rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { resourceAttributes := rss.At(i).Resource().Attributes() - processInfraTags(iasp.logger, iasp.tagger, iasp.cardinality, iasp.generateID, resourceAttributes) + processInfraTags(iasp.logger, iasp.tagger, iasp.cardinality, resourceAttributes) } return td, nil } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go index 5508562727d8b8..db929e287425d0 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go @@ -82,7 +82,8 @@ var ( "k8s.namespace.name": "namespace", "k8s.deployment.name": "deployment", }, - }}), + }, + }), outResourceAttributes: []map[string]any{ { "global": "tag", @@ -127,8 +128,7 @@ func TestInfraAttributesTraceProcessor(t *testing.T) { tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} - gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc, gc) + factory := NewFactoryForAgent(tc) fmp, err := factory.CreateTraces( context.Background(), processortest.NewNopSettings(), diff --git a/go.mod b/go.mod index 472025f39f2961..d8c5fbd91db6f3 100644 --- a/go.mod +++ b/go.mod @@ -597,13 +597,13 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/log/fx v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/subscriber v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/fx v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/def v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/generic_store v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/subscriber v0.0.0-20250129172314-517df3f51a84 + github.com/DataDog/datadog-agent/comp/core/tagger/telemetry v0.0.0-20250129172314-517df3f51a84 github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 github.com/Masterminds/sprig/v3 v3.3.0 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e @@ -642,7 +642,7 @@ require ( require ( github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/DataDog/agent-payload/v5 v5.0.141 - github.com/DataDog/datadog-agent/comp/api/api/def v0.60.0 + github.com/DataDog/datadog-agent/comp/api/api/def v0.61.0 github.com/DataDog/datadog-agent/comp/core/config v0.64.0-devel github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.61.0 diff --git a/test/otel/testdata/builder-config.yaml b/test/otel/testdata/builder-config.yaml index 7053c026911a2a..1ddb05ddce7bb4 100644 --- a/test/otel/testdata/builder-config.yaml +++ b/test/otel/testdata/builder-config.yaml @@ -38,6 +38,8 @@ extensions: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 processors: +- gomod: github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.61.0 + path: ./comp/otelcol/otlp/components/processor/infraattributesprocessor - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor