diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 2c7aab7913d3..072676c38ea0 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -352,6 +352,8 @@ jobs: fi - name: "Instrumentation Test" run: ./tests/e2e-inst-test.sh + - name: "Analyze Mode Instrumentation Test" + run: ./tests/e2e-analyze-inst-test.sh - name: "Network Test" run: ./tests/e2e-net-test.sh - name: "Kernel Test" diff --git a/Makefile b/Makefile index f15939c33960..e381f44dd2a7 100644 --- a/Makefile +++ b/Makefile @@ -685,12 +685,16 @@ clean-e2e-net-signatures: # e2e instrumentation signatures E2E_INST_DIR ?= tests/e2e-inst-signatures +E2E_INST_FILES_TO_EXCLUDE ?= "" +# Loop through each filename in the environment variable and construct the exclusion part of the find command +IGNORE_FILES := $(foreach file,$(shell echo $(E2E_INST_FILES_TO_EXCLUDE)),! -name '$(file)') E2E_INST_SRC := $(shell find $(E2E_INST_DIR) \ -type f \ -name '*.go' \ ! -name '*_test.go' \ ! -path '$(E2E_INST_DIR)/scripts/*' \ ! -path '$(E2E_INST_DIR)/datasourcetest/*' \ + $(IGNORE_FILES) \ ) .PHONY: e2e-inst-signatures diff --git a/cmd/tracee/cmd/analyze.go b/cmd/tracee/cmd/analyze.go deleted file mode 100644 index 7d8c6c953dc6..000000000000 --- a/cmd/tracee/cmd/analyze.go +++ /dev/null @@ -1,246 +0,0 @@ -package cmd - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "os" - "os/signal" - "syscall" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - "github.com/aquasecurity/tracee/pkg/cmd/flags" - "github.com/aquasecurity/tracee/pkg/cmd/initialize" - tracee "github.com/aquasecurity/tracee/pkg/ebpf" - "github.com/aquasecurity/tracee/pkg/events" - "github.com/aquasecurity/tracee/pkg/logger" - "github.com/aquasecurity/tracee/pkg/signatures/engine" - "github.com/aquasecurity/tracee/pkg/signatures/signature" - "github.com/aquasecurity/tracee/types/detect" - "github.com/aquasecurity/tracee/types/protocol" - "github.com/aquasecurity/tracee/types/trace" -) - -func init() { - rootCmd.AddCommand(analyze) - - // flags - - // events - analyze.Flags().StringArrayP( - "events", - "e", - []string{}, - "Define which signature events to load", - ) - - // signatures-dir - analyze.Flags().StringArray( - "signatures-dir", - []string{}, - "Directory where to search for signatures in OPA (.rego) and Go plugin (.so) formats", - ) - - // rego - analyze.Flags().StringArray( - "rego", - []string{}, - "Control event rego settings", - ) - - analyze.Flags().StringArrayP( - "log", - "l", - []string{"info"}, - "Logger options [debug|info|warn...]", - ) -} - -var analyze = &cobra.Command{ - Use: "analyze input.json", - Aliases: []string{}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Short: "Analyze past events with signature events [Experimental]", - Long: `Analyze allow you to explore signature events with past events. - -Tracee can be used to collect events and store it in a file. This file can be used as input to analyze. - -eg: -tracee --events ptrace --output=json:events.json -tracee analyze --events anti_debugging events.json`, - PreRun: func(cmd *cobra.Command, args []string) { - bindViperFlag(cmd, "events") - bindViperFlag(cmd, "log") - bindViperFlag(cmd, "rego") - bindViperFlag(cmd, "signatures-dir") - }, - Run: func(cmd *cobra.Command, args []string) { - logFlags := viper.GetStringSlice("log") - - logCfg, err := flags.PrepareLogger(logFlags, true) - if err != nil { - logger.Fatalw("Failed to prepare logger", "error", err) - } - logger.Init(logCfg) - - inputFile, err := os.Open(args[0]) - if err != nil { - logger.Fatalw("Failed to get signatures-dir flag", "err", err) - } - - // Rego command line flags - - rego, err := flags.PrepareRego(viper.GetStringSlice("rego")) - if err != nil { - logger.Fatalw("Failed to parse rego flags", "err", err) - } - - // Signature directory command line flags - - signatureEvents := viper.GetStringSlice("events") - // if no event was passed, load all events - if len(signatureEvents) == 0 { - signatureEvents = nil - } - - sigs, _, err := signature.Find( - rego.RuntimeTarget, - rego.PartialEval, - viper.GetStringSlice("signatures-dir"), - signatureEvents, - rego.AIO, - ) - - if err != nil { - logger.Fatalw("Failed to find signature event", "err", err) - } - - if len(sigs) == 0 { - logger.Fatalw("No signature event loaded") - } - - logger.Infow( - "Signatures loaded", - "total", len(sigs), - "signatures", getSigsNames(sigs), - ) - - _ = initialize.CreateEventsFromSignatures(events.StartSignatureID, sigs) - - engineConfig := engine.Config{ - Signatures: sigs, - SignatureBufferSize: 1000, - } - - ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer stop() - - engineOutput := make(chan *detect.Finding) - engineInput := make(chan protocol.Event) - - source := engine.EventSources{Tracee: engineInput} - sigEngine, err := engine.NewEngine(engineConfig, source, engineOutput) - if err != nil { - logger.Fatalw("Failed to create engine", "err", err) - } - - err = sigEngine.Init() - if err != nil { - logger.Fatalw("failed to initialize signature engine", "err", err) - } - - go sigEngine.Start(ctx) - - // producer - go produce(ctx, inputFile, engineInput) - - // consumer - for { - select { - case finding, ok := <-engineOutput: - if !ok { - return - } - process(finding) - case <-ctx.Done(): - goto drain - } - } - drain: - // drain - for { - select { - case finding, ok := <-engineOutput: - if !ok { - return - } - process(finding) - default: - return - } - } - }, - DisableFlagsInUseLine: true, -} - -func produce(ctx context.Context, inputFile *os.File, engineInput chan protocol.Event) { - // ensure the engineInput channel will be closed - defer close(engineInput) - - scanner := bufio.NewScanner(inputFile) - scanner.Split(bufio.ScanLines) - for { - select { - case <-ctx.Done(): - return - default: - if !scanner.Scan() { // if EOF or error close the done channel and return - return - } - - var e trace.Event - err := json.Unmarshal(scanner.Bytes(), &e) - if err != nil { - logger.Fatalw("Failed to unmarshal event", "err", err) - } - engineInput <- e.ToProtocol() - } - } -} - -func process(finding *detect.Finding) { - event, err := tracee.FindingToEvent(finding) - if err != nil { - logger.Fatalw("Failed to convert finding to event", "err", err) - } - - jsonEvent, err := json.Marshal(event) - if err != nil { - logger.Fatalw("Failed to json marshal event", "err", err) - } - - fmt.Println(string(jsonEvent)) -} - -func bindViperFlag(cmd *cobra.Command, flag string) { - err := viper.BindPFlag(flag, cmd.Flags().Lookup(flag)) - if err != nil { - logger.Fatalw("Error binding viper flag", "flag", flag, "error", err) - } -} - -func getSigsNames(sigs []detect.Signature) []string { - var sigsNames []string - for _, sig := range sigs { - sigMeta, err := sig.GetMetadata() - if err != nil { - logger.Warnw("Failed to get signature metadata", "err", err) - continue - } - sigsNames = append(sigsNames, sigMeta.Name) - } - return sigsNames -} diff --git a/cmd/tracee/cmd/root.go b/cmd/tracee/cmd/root.go index 8aeefe27df83..6419b4895c2b 100644 --- a/cmd/tracee/cmd/root.go +++ b/cmd/tracee/cmd/root.go @@ -29,6 +29,9 @@ var ( Long: `Tracee uses eBPF technology to tap into your system and give you access to hundreds of events that help you understand how your system behaves.`, DisableFlagParsing: true, // in order to have fine grained control over flags parsing + PersistentPreRun: func(cmd *cobra.Command, args []string) { + logger.Init(logger.NewDefaultLoggingConfig()) + }, PreRun: func(cmd *cobra.Command, args []string) { if len(args) > 0 { // parse all flags @@ -47,9 +50,28 @@ access to hundreds of events that help you understand how your system behaves.`, } checkConfigFlag() } + bindViperFlag(cmd, "output") + bindViperFlag(cmd, "no-containers") + bindViperFlag(cmd, "cri") + bindViperFlag(cmd, "signatures-dir") + bindViperFlag(cmd, "rego") + bindViperFlag(cmd, "perf-buffer-size") + bindViperFlag(cmd, "blob-perf-buffer-size") + bindViperFlag(cmd, "cache") + bindViperFlag(cmd, "proctree") + bindViperFlag(cmd, server.HealthzEndpointFlag) + bindViperFlag(cmd, server.PProfEndpointFlag) + bindViperFlag(cmd, server.PyroscopeAgentFlag) + bindViperFlag(cmd, server.HTTPListenEndpointFlag) + bindViperFlag(cmd, server.GRPCListenEndpointFlag) + bindViperFlag(cmd, "capabilities") + bindViperFlag(cmd, "install-path") + bindViperFlag(cmd, "log") + bindViperFlag(cmd, server.MetricsEndpointFlag) + bindViperFlag(cmd, "input") + bindViperFlag(cmd, "dnscache") }, Run: func(cmd *cobra.Command, args []string) { - logger.Init(logger.NewDefaultLoggingConfig()) initialize.SetLibbpfgoCallbacks() runner, err := cmdcobra.GetTraceeRunner(cmd, version.GetVersion()) @@ -122,10 +144,6 @@ func initCmd() error { []string{"table"}, "[json|none|webhook...]\t\tControl how and where output is printed", ) - err := viper.BindPFlag("output", rootCmd.Flags().Lookup("output")) - if err != nil { - return errfmt.WrapError(err) - } // capture is not bound to viper rootCmd.Flags().StringArrayP( @@ -152,20 +170,12 @@ func initCmd() error { false, "\t\t\t\t\tDisable container info enrichment to events. Safeguard option.", ) - err = viper.BindPFlag("no-containers", rootCmd.Flags().Lookup("no-containers")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().StringArray( "cri", []string{}, "\t\t\tDefine connected container runtimes", ) - err = viper.BindPFlag("cri", rootCmd.Flags().Lookup("cri")) - if err != nil { - return errfmt.WrapError(err) - } // Signature flags @@ -174,20 +184,12 @@ func initCmd() error { []string{}, "\t\t\t\tDirectories where to search for signatures in OPA (.rego) and Go plugin (.so) formats", ) - err = viper.BindPFlag("signatures-dir", rootCmd.Flags().Lookup("signatures-dir")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().StringArray( "rego", []string{}, "[partial-eval|aio]\t\t\tControl event rego settings", ) - err = viper.BindPFlag("rego", rootCmd.Flags().Lookup("rego")) - if err != nil { - return errfmt.WrapError(err) - } // Buffer/Cache flags @@ -197,20 +199,12 @@ func initCmd() error { 1024, // 4 MB of contiguous pages "\t\t\t\tSize, in pages, of the internal perf ring buffer used to submit events from the kernel", ) - err = viper.BindPFlag("perf-buffer-size", rootCmd.Flags().Lookup("perf-buffer-size")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().Int( "blob-perf-buffer-size", 1024, // 4 MB of contiguous pages "\t\t\t\tSize, in pages, of the internal perf ring buffer used to send blobs from the kernel", ) - err = viper.BindPFlag("blob-perf-buffer-size", rootCmd.Flags().Lookup("blob-perf-buffer-size")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().StringArrayP( "cache", @@ -218,10 +212,6 @@ func initCmd() error { []string{"none"}, "[type|mem-cache-size]\t\tControl event caching queues", ) - err = viper.BindPFlag("cache", rootCmd.Flags().Lookup("cache")) - if err != nil { - return errfmt.WrapError(err) - } // Process Tree flags @@ -231,10 +221,6 @@ func initCmd() error { []string{"none"}, "[process|thread]\t\t\tControl process tree options", ) - err = viper.BindPFlag("proctree", rootCmd.Flags().Lookup("proctree")) - if err != nil { - return errfmt.WrapError(err) - } // DNS Cache flags @@ -243,10 +229,6 @@ func initCmd() error { []string{"none"}, "\t\t\t\t\tEnable DNS Cache", ) - err = viper.BindPFlag("dnscache", rootCmd.Flags().Lookup("dnscache")) - if err != nil { - return errfmt.WrapError(err) - } // Server flags @@ -255,60 +237,36 @@ func initCmd() error { false, "\t\t\t\t\tEnable metrics endpoint", ) - err = viper.BindPFlag(server.MetricsEndpointFlag, rootCmd.Flags().Lookup(server.MetricsEndpointFlag)) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().Bool( server.HealthzEndpointFlag, false, "\t\t\t\t\tEnable healthz endpoint", ) - err = viper.BindPFlag(server.HealthzEndpointFlag, rootCmd.Flags().Lookup(server.HealthzEndpointFlag)) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().Bool( server.PProfEndpointFlag, false, "\t\t\t\t\tEnable pprof endpoints", ) - err = viper.BindPFlag(server.PProfEndpointFlag, rootCmd.Flags().Lookup(server.PProfEndpointFlag)) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().Bool( server.PyroscopeAgentFlag, false, "\t\t\t\t\tEnable pyroscope agent", ) - err = viper.BindPFlag(server.PyroscopeAgentFlag, rootCmd.Flags().Lookup(server.PyroscopeAgentFlag)) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().String( server.HTTPListenEndpointFlag, ":3366", "\t\t\t\tListening address of the metrics endpoint server", ) - err = viper.BindPFlag(server.HTTPListenEndpointFlag, rootCmd.Flags().Lookup(server.HTTPListenEndpointFlag)) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().String( server.GRPCListenEndpointFlag, "", // disabled by default "\t\t\tListening address of the grpc server eg: tcp:4466, unix:/tmp/tracee.sock (default: disabled)", ) - err = viper.BindPFlag(server.GRPCListenEndpointFlag, rootCmd.Flags().Lookup(server.GRPCListenEndpointFlag)) - if err != nil { - return errfmt.WrapError(err) - } // Other flags @@ -318,20 +276,12 @@ func initCmd() error { []string{}, "[bypass|add|drop]\t\t\tDefine capabilities for tracee to run with", ) - err = viper.BindPFlag("capabilities", rootCmd.Flags().Lookup("capabilities")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().String( "install-path", "/tmp/tracee", "\t\t\t\tPath where tracee will install or lookup it's resources", ) - err = viper.BindPFlag("install-path", rootCmd.Flags().Lookup("install-path")) - if err != nil { - return errfmt.WrapError(err) - } rootCmd.Flags().StringArrayP( "log", @@ -339,10 +289,13 @@ func initCmd() error { []string{"info"}, "[debug|info|warn...]\t\tLogger options", ) - err = viper.BindPFlag("log", rootCmd.Flags().Lookup("log")) - if err != nil { - return errfmt.WrapError(err) - } + + rootCmd.Flags().StringP( + "input", + "i", + "", + "[json|rego]\t\t\tControl how and where input events stream is received", + ) rootCmd.Flags().SortFlags = false @@ -380,3 +333,10 @@ func Execute() error { return rootCmd.Execute() } + +func bindViperFlag(cmd *cobra.Command, flag string) { + err := viper.BindPFlag(flag, cmd.Flags().Lookup(flag)) + if err != nil { + logger.Fatalw("Error binding viper flag", "flag", flag, "error", err) + } +} diff --git a/docs/docs/events/builtin/extra/init_tracee_data.md b/docs/docs/events/builtin/extra/init_tracee_data.md new file mode 100644 index 000000000000..ceb69c9b1a77 --- /dev/null +++ b/docs/docs/events/builtin/extra/init_tracee_data.md @@ -0,0 +1,26 @@ +# init_tracee_data + +## Intro + +init_tracee_data - An event that exports some relevant data of Tracee upon startup. + +## Description + +This is an event create in user-mode upon Tracee's initialization. Hence, it should be one of the first events to be created by Tracee. +The event is used to pass the user some internal data of Tracee that might have some significant for events analyze. +The event was created also with the Analyze mode of Tracee in mind, to pass the Analyze mode some information regarding how Tracee ran during runtime. + +## Arguments + +* `boot_time`:`u64`[U] - the boot time of the system Tracee run in since epoch. +* `start_time`:`u64`[U] - the time Tracee started since epoch. + +## Hooks + +## Example Use Case + +The event could be used to calculate the relative time of events since Tracee's start. + +## Related Events + +`init_namespaces` \ No newline at end of file diff --git a/docs/docs/events/custom/analyze.md b/docs/docs/events/custom/analyze.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pkg/cmd/cobra/config.go b/pkg/cmd/cobra/config.go index 9f2cc27d4b84..b408d042b2b4 100644 --- a/pkg/cmd/cobra/config.go +++ b/pkg/cmd/cobra/config.go @@ -404,6 +404,9 @@ func (c *OutputConfig) flags() []string { if c.Options.ParseArguments { flags = append(flags, "option:parse-arguments") } + if c.Options.DisableParseArguments { + flags = append(flags, "option:disable-parse-arguments") + } if c.Options.ParseArgumentsFDs { flags = append(flags, "option:parse-arguments-fds") } @@ -475,14 +478,15 @@ func (c *OutputConfig) flags() []string { } type OutputOptsConfig struct { - None bool `mapstructure:"none"` - StackAddresses bool `mapstructure:"stack-addresses"` - ExecEnv bool `mapstructure:"exec-env"` - RelativeTime bool `mapstructure:"relative-time"` - ExecHash string `mapstructure:"exec-hash"` - ParseArguments bool `mapstructure:"parse-arguments"` - ParseArgumentsFDs bool `mapstructure:"parse-arguments-fds"` - SortEvents bool `mapstructure:"sort-events"` + None bool `mapstructure:"none"` + StackAddresses bool `mapstructure:"stack-addresses"` + ExecEnv bool `mapstructure:"exec-env"` + RelativeTime bool `mapstructure:"relative-time"` + ExecHash string `mapstructure:"exec-hash"` + ParseArguments bool `mapstructure:"parse-arguments"` + DisableParseArguments bool `mapstructure:"disable-parse-arguments"` + ParseArgumentsFDs bool `mapstructure:"parse-arguments-fds"` + SortEvents bool `mapstructure:"sort-events"` } type OutputFormatConfig struct { diff --git a/pkg/cmd/cobra/cobra.go b/pkg/cmd/cobra/root.go similarity index 70% rename from pkg/cmd/cobra/cobra.go rename to pkg/cmd/cobra/root.go index 71299f8a2783..e3bf972e1a86 100644 --- a/pkg/cmd/cobra/cobra.go +++ b/pkg/cmd/cobra/root.go @@ -18,6 +18,7 @@ import ( "github.com/aquasecurity/tracee/pkg/k8s/apis/tracee.aquasec.com/v1beta1" "github.com/aquasecurity/tracee/pkg/logger" "github.com/aquasecurity/tracee/pkg/policy" + "github.com/aquasecurity/tracee/pkg/producer" "github.com/aquasecurity/tracee/pkg/signatures/engine" "github.com/aquasecurity/tracee/pkg/signatures/signature" "github.com/aquasecurity/tracee/pkg/utils/environment" @@ -91,78 +92,135 @@ func GetTraceeRunner(c *cobra.Command, version string) (cmd.Runner, error) { logger.Debugw("OSInfo", osInfoSlice...) } + traceeInstallPath := viper.GetString("install-path") + cfg.OSInfo = osInfo - // Container Runtime command line flags + // Input command line flags + input, err := flags.PrepareInput(viper.GetString("input")) - if !cfg.NoContainersEnrich { - criFlags, err := GetFlagsFromViper("cri") + if err != nil { + return runner, err + } + + var inputProducer producer.EventsProducer + if input != nil { + inputProducer, err = producer.New(input) if err != nil { return runner, err } + } else { // Init all eBPF related values + // Container Runtime command line flags + + if !cfg.NoContainersEnrich { + criFlags, err := GetFlagsFromViper("cri") + if err != nil { + return runner, err + } + + sockets, err := flags.PrepareContainers(criFlags) + if err != nil { + return runner, err + } + cfg.Sockets = sockets + } - sockets, err := flags.PrepareContainers(criFlags) + // Cache command line flags + + cacheFlags, err := GetFlagsFromViper("cache") if err != nil { return runner, err } - cfg.Sockets = sockets - } - // Cache command line flags + cache, err := flags.PrepareCache(cacheFlags) + if err != nil { + return runner, err + } + cfg.Cache = cache + if cfg.Cache != nil { + logger.Debugw("Cache", "type", cfg.Cache.String()) + } - cacheFlags, err := GetFlagsFromViper("cache") - if err != nil { - return runner, err - } + // DNS Cache command line flags - cache, err := flags.PrepareCache(cacheFlags) - if err != nil { - return runner, err - } - cfg.Cache = cache - if cfg.Cache != nil { - logger.Debugw("Cache", "type", cfg.Cache.String()) - } + dnsCacheFlags, err := GetFlagsFromViper("dnscache") + if err != nil { + return runner, err + } - // Process Tree command line flags + dnsCache, err := flags.PrepareDnsCache(dnsCacheFlags) + if err != nil { + return runner, err + } - procTreeFlags, err := GetFlagsFromViper("proctree") - if err != nil { - return runner, err - } + cfg.DNSCacheConfig = dnsCache - procTree, err := flags.PrepareProcTree(procTreeFlags) - if err != nil { - return runner, err - } - cfg.ProcTree = procTree + // Capture command line flags - via cobra flag + + captureFlags, err := c.Flags().GetStringArray("capture") + if err != nil { + return runner, err + } - // DNS Cache command line flags + capture, err := flags.PrepareCapture(captureFlags, true) + if err != nil { + return runner, err + } + cfg.Capture = &capture - dnsCacheFlags, err := GetFlagsFromViper("dnscache") - if err != nil { - return runner, err - } + // Check kernel lockdown - dnsCache, err := flags.PrepareDnsCache(dnsCacheFlags) - if err != nil { - return runner, err - } + lockdown, err := environment.Lockdown() + if err != nil { + logger.Debugw("OSInfo", "lockdown", err) + } + if err == nil && lockdown == environment.CONFIDENTIALITY { + return runner, errfmt.Errorf("kernel lockdown is set to 'confidentiality', can't load eBPF programs") + } - cfg.DNSCacheConfig = dnsCache + logger.Debugw("OSInfo", "security_lockdown", lockdown) - // Capture command line flags - via cobra flag + // Check if ftrace is enabled - captureFlags, err := c.Flags().GetStringArray("capture") - if err != nil { - return runner, err + enabled, err := environment.FtraceEnabled() + if err != nil { + return runner, err + } + if !enabled { + logger.Errorw("ftrace_enabled: ftrace is not enabled, kernel events won't be caught, make sure to enable it by executing echo 1 | sudo tee /proc/sys/kernel/ftrace_enabled") + } + + // Pick OS information + + kernelConfig, err := initialize.KernelConfig() + if err != nil { + return runner, err + } + + // Decide BTF & BPF files to use (based in the kconfig, release & environment info) + + err = initialize.BpfObject(&cfg, kernelConfig, osInfo, traceeInstallPath, version) + if err != nil { + return runner, errfmt.Errorf("failed preparing BPF object: %v", err) + } + } + + // Process Tree command line flags + + // TODO: support proctree in analyze mode + var procTreeFlags = []string{"none"} + if input == nil { + procTreeFlags, err = GetFlagsFromViper("proctree") + if err != nil { + return runner, err + } } - capture, err := flags.PrepareCapture(captureFlags, true) + procTree, err := flags.PrepareProcTree(procTreeFlags) if err != nil { return runner, err } - cfg.Capture = &capture + cfg.ProcTree = procTree // Capabilities command line flags @@ -256,43 +314,6 @@ func GetTraceeRunner(c *cobra.Command, version string) (cmd.Runner, error) { return runner, err } - // Check kernel lockdown - - lockdown, err := environment.Lockdown() - if err != nil { - logger.Debugw("OSInfo", "lockdown", err) - } - if err == nil && lockdown == environment.CONFIDENTIALITY { - return runner, errfmt.Errorf("kernel lockdown is set to 'confidentiality', can't load eBPF programs") - } - - logger.Debugw("OSInfo", "security_lockdown", lockdown) - - // Check if ftrace is enabled - - enabled, err := environment.FtraceEnabled() - if err != nil { - return runner, err - } - if !enabled { - logger.Errorw("ftrace_enabled: ftrace is not enabled, kernel events won't be caught, make sure to enable it by executing echo 1 | sudo tee /proc/sys/kernel/ftrace_enabled") - } - - // Pick OS information - - kernelConfig, err := initialize.KernelConfig() - if err != nil { - return runner, err - } - - // Decide BTF & BPF files to use (based in the kconfig, release & environment info) - - traceeInstallPath := viper.GetString("install-path") - err = initialize.BpfObject(&cfg, kernelConfig, osInfo, traceeInstallPath, version) - if err != nil { - return runner, errfmt.Errorf("failed preparing BPF object: %v", err) - } - // Prepare the server httpServer, err := server.PrepareHTTPServer( @@ -315,11 +336,9 @@ func GetTraceeRunner(c *cobra.Command, version string) (cmd.Runner, error) { runner.GRPCServer = grpcServer runner.TraceeConfig = cfg runner.Printer = p + runner.Producer = inputProducer runner.InstallPath = traceeInstallPath - // parse arguments must be enabled if the rule engine is part of the pipeline - runner.TraceeConfig.Output.ParseArguments = true - runner.TraceeConfig.EngineConfig = engine.Config{ Enabled: true, SigNameToEventID: sigNameToEventId, diff --git a/pkg/cmd/flags/input.go b/pkg/cmd/flags/input.go new file mode 100644 index 000000000000..bf0d4f569523 --- /dev/null +++ b/pkg/cmd/flags/input.go @@ -0,0 +1,79 @@ +package flags + +import ( + "fmt" + "os" + "strings" + + "github.com/aquasecurity/tracee/pkg/capabilities" + "github.com/aquasecurity/tracee/pkg/config" + "github.com/aquasecurity/tracee/pkg/errfmt" + cap2 "kernel.org/pub/linux/libs/security/libcap/cap" +) + +// PrepareInput create the events producer configuration for Tracee. +// Input producer is a substitute to the eBPF code of Tracee, hence the return value will +// be nil if the eBPF code should be used. +func PrepareInput(inputOption string) (*config.ProducerConfig, error) { + if inputOption == "" { + return nil, nil + } + inputSourceOptions := &config.ProducerConfig{} + inParts := strings.SplitN(inputOption, ":", 2) + + switch inputSourceOptions.Kind = inParts[0]; inputSourceOptions.Kind { + case "json", "rego": + inputSourceOptions.Kind = inParts[0] + var fileOpt string + switch len(inParts) { + case 1: + fileOpt = "stdin" + case 2: + fileOpt = inParts[1] + default: + return nil, fmt.Errorf( + "invalid input option: %s, use '--input help' for more info", + inputOption, + ) + } + err := parseTraceeInputSource(inputSourceOptions, fileOpt) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf( + "invalid input flag: %s, use '--help' for more info", + inputSourceOptions.Kind, + ) + } + return inputSourceOptions, nil +} + +func parseTraceeInputSource(option *config.ProducerConfig, fileOpt string) error { + var f *os.File + + if fileOpt == "stdin" { + option.InputSource = os.Stdin + return nil + } + err := capabilities.GetInstance().Specific( + func() error { + _, err := os.Stat(fileOpt) + if err != nil { + return errfmt.Errorf("invalid Tracee input file: %s", fileOpt) + } + f, err = os.Open(fileOpt) + if err != nil { + return errfmt.Errorf("invalid file: %s", fileOpt) + } + return nil + }, + cap2.DAC_OVERRIDE, + ) + if err != nil { + return errfmt.WrapError(err) + } + option.InputSource = f + + return nil +} diff --git a/pkg/cmd/flags/output.go b/pkg/cmd/flags/output.go index 668b56cd3609..8e50e979c666 100644 --- a/pkg/cmd/flags/output.go +++ b/pkg/cmd/flags/output.go @@ -19,7 +19,9 @@ type PrepareOutputResult struct { func PrepareOutput(outputSlice []string, newBinary bool) (PrepareOutputResult, error) { outConfig := PrepareOutputResult{} - traceeConfig := &config.OutputConfig{} + traceeConfig := &config.OutputConfig{ + ParseArguments: true, + } // outpath:format printerMap := make(map[string]string) @@ -105,6 +107,8 @@ func setOption(cfg *config.OutputConfig, option string, newBinary bool) error { cfg.RelativeTime = true case "parse-arguments": cfg.ParseArguments = true + case "disable-parse-arguments": + cfg.ParseArguments = false case "parse-arguments-fds": cfg.ParseArgumentsFDs = true cfg.ParseArguments = true // no point in parsing file descriptor args only @@ -158,11 +162,6 @@ func getPrinterConfigs(printerMap map[string]string, traceeConfig *config.Output printerConfigs := make([]config.PrinterConfig, 0, len(printerMap)) for outPath, printerKind := range printerMap { - if printerKind == "table" { - if err := setOption(traceeConfig, "parse-arguments", newBinary); err != nil { - return nil, err - } - } outFile := os.Stdout var err error diff --git a/pkg/cmd/flags/tracee_ebpf_output.go b/pkg/cmd/flags/tracee_ebpf_output.go index 24f5957ff98b..6e97e02fd847 100644 --- a/pkg/cmd/flags/tracee_ebpf_output.go +++ b/pkg/cmd/flags/tracee_ebpf_output.go @@ -73,12 +73,6 @@ func TraceeEbpfPrepareOutput(outputSlice []string, newBinary bool) (PrepareOutpu printerConfigs := make([]config.PrinterConfig, 0) - if printerKind == "table" { - if err := setOption(traceeConfig, "parse-arguments", newBinary); err != nil { - return outConfig, err - } - } - if outPath == "" { stdoutConfig := config.PrinterConfig{ Kind: printerKind, diff --git a/pkg/cmd/tracee.go b/pkg/cmd/tracee.go index 6291b65f3435..0f072a158490 100644 --- a/pkg/cmd/tracee.go +++ b/pkg/cmd/tracee.go @@ -6,6 +6,7 @@ import ( "strconv" "syscall" + "github.com/aquasecurity/tracee/pkg/producer" "github.com/aquasecurity/tracee/pkg/cmd/printer" "github.com/aquasecurity/tracee/pkg/config" tracee "github.com/aquasecurity/tracee/pkg/ebpf" @@ -19,6 +20,7 @@ import ( type Runner struct { TraceeConfig config.Config Printer printer.EventPrinter + Producer producer.EventsProducer InstallPath string HTTPServer *http.Server GRPCServer *grpc.Server @@ -32,6 +34,8 @@ func (r Runner) Run(ctx context.Context) error { return errfmt.Errorf("error creating Tracee: %v", err) } + t.SetProducer(r.Producer) + // Readiness Callback: Tracee is ready to receive events t.AddReadyCallback( func(ctx context.Context) { diff --git a/pkg/config/config.go b/pkg/config/config.go index c60c202612db..1517394c1692 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -46,26 +46,29 @@ func (c Config) Validate() error { } // Capture - if len(c.Capture.FileWrite.PathFilter) > 3 { - return errfmt.Errorf("too many file-write path filters given") - } - for _, filter := range c.Capture.FileWrite.PathFilter { - if len(filter) > 50 { - return errfmt.Errorf("the length of a path filter is limited to 50 characters: %s", filter) + if c.Capture != nil { + if len(c.Capture.FileWrite.PathFilter) > 3 { + return errfmt.Errorf("too many file-write path filters given") } - } - if len(c.Capture.FileRead.PathFilter) > 3 { - return errfmt.Errorf("too many file-read path filters given") - } - for _, filter := range c.Capture.FileWrite.PathFilter { - if len(filter) > 50 { - return errfmt.Errorf("the length of a path filter is limited to 50 characters: %s", filter) + for _, filter := range c.Capture.FileWrite.PathFilter { + if len(filter) > 50 { + return errfmt.Errorf( + "the length of a path filter is limited to 50 characters: %s", + filter, + ) + } + } + if len(c.Capture.FileRead.PathFilter) > 3 { + return errfmt.Errorf("too many file-read path filters given") + } + for _, filter := range c.Capture.FileWrite.PathFilter { + if len(filter) > 50 { + return errfmt.Errorf( + "the length of a path filter is limited to 50 characters: %s", + filter, + ) + } } - } - - // BPF - if c.BPFObjBytes == nil { - return errfmt.Errorf("nil bpf object in memory") } return nil @@ -185,3 +188,8 @@ type PrinterConfig struct { ContainerMode ContainerMode RelativeTS bool } + +type ProducerConfig struct { + Kind string + InputSource io.Reader +} diff --git a/pkg/ebpf/c/tracee.bpf.c b/pkg/ebpf/c/tracee.bpf.c index 8b7eccaeb171..3f86e0ed5d54 100644 --- a/pkg/ebpf/c/tracee.bpf.c +++ b/pkg/ebpf/c/tracee.bpf.c @@ -671,42 +671,36 @@ int tracepoint__sched__sched_process_fork(struct bpf_raw_tracepoint_args *ctx) save_to_submit_buf(&p.event->args_buf, (void *) &child_ns_pid, sizeof(int), 8); save_to_submit_buf(&p.event->args_buf, (void *) &child_start_time, sizeof(u64), 9); - // Process tree information (if needed). - if (p.config->options & OPT_FORK_PROCTREE) { - // Both, the thread group leader and the "up_parent" (the first process, not lwp, found - // as a parent of the child in the hierarchy), are needed by the userland process tree. - // The userland process tree default source of events is the signal events, but there is - // an option to use regular event for maintaining it as well (and it is needed for some - // situatins). These arguments will always be removed by userland event processors. - struct task_struct *leader = get_leader_task(child); - struct task_struct *up_parent = get_leader_task(get_parent_task(leader)); - - // Up Parent information: Go up in hierarchy until parent is process. - u64 up_parent_start_time = get_task_start_time(up_parent); - int up_parent_pid = get_task_host_tgid(up_parent); - int up_parent_tid = get_task_host_pid(up_parent); - int up_parent_ns_pid = get_task_ns_tgid(up_parent); - int up_parent_ns_tid = get_task_ns_pid(up_parent); - // Leader information. - u64 leader_start_time = get_task_start_time(leader); - int leader_pid = get_task_host_tgid(leader); - int leader_tid = get_task_host_pid(leader); - int leader_ns_pid = get_task_ns_tgid(leader); - int leader_ns_tid = get_task_ns_pid(leader); - - // Up Parent: always a process (might be the same as Parent if parent is a process). - save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_tid, sizeof(int), 10); - save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_ns_tid, sizeof(int), 11); - save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_pid, sizeof(int), 12); - save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_ns_pid, sizeof(int), 13); - save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_start_time, sizeof(u64), 14); - // Leader: always a process (might be the same as the Child if child is a process). - save_to_submit_buf(&p.event->args_buf, (void *) &leader_tid, sizeof(int), 15); - save_to_submit_buf(&p.event->args_buf, (void *) &leader_ns_tid, sizeof(int), 16); - save_to_submit_buf(&p.event->args_buf, (void *) &leader_pid, sizeof(int), 17); - save_to_submit_buf(&p.event->args_buf, (void *) &leader_ns_pid, sizeof(int), 18); - save_to_submit_buf(&p.event->args_buf, (void *) &leader_start_time, sizeof(u64), 19); - } + struct task_struct *leader = get_leader_task(child); + // "up_parent" is the first process, not lwp, found + // as a parent of the child in the hierarchy. + struct task_struct *up_parent = get_leader_task(get_parent_task(leader)); + + // Up Parent information: Go up in hierarchy until parent is process. + u64 up_parent_start_time = get_task_start_time(up_parent); + int up_parent_pid = get_task_host_tgid(up_parent); + int up_parent_tid = get_task_host_pid(up_parent); + int up_parent_ns_pid = get_task_ns_tgid(up_parent); + int up_parent_ns_tid = get_task_ns_pid(up_parent); + // Leader information. + u64 leader_start_time = get_task_start_time(leader); + int leader_pid = get_task_host_tgid(leader); + int leader_tid = get_task_host_pid(leader); + int leader_ns_pid = get_task_ns_tgid(leader); + int leader_ns_tid = get_task_ns_pid(leader); + + // Up Parent: always a process (might be the same as Parent if parent is a process). + save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_tid, sizeof(int), 10); + save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_ns_tid, sizeof(int), 11); + save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_pid, sizeof(int), 12); + save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_ns_pid, sizeof(int), 13); + save_to_submit_buf(&p.event->args_buf, (void *) &up_parent_start_time, sizeof(u64), 14); + // Leader: always a process (might be the same as the Child if child is a process). + save_to_submit_buf(&p.event->args_buf, (void *) &leader_tid, sizeof(int), 15); + save_to_submit_buf(&p.event->args_buf, (void *) &leader_ns_tid, sizeof(int), 16); + save_to_submit_buf(&p.event->args_buf, (void *) &leader_pid, sizeof(int), 17); + save_to_submit_buf(&p.event->args_buf, (void *) &leader_ns_pid, sizeof(int), 18); + save_to_submit_buf(&p.event->args_buf, (void *) &leader_start_time, sizeof(u64), 19); // Submit events_perf_submit(&p, 0); diff --git a/pkg/ebpf/events_pipeline.go b/pkg/ebpf/events_pipeline.go index 583f6060d37d..4003a2c50b7e 100644 --- a/pkg/ebpf/events_pipeline.go +++ b/pkg/ebpf/events_pipeline.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/binary" + "io" "strconv" "sync" "unsafe" @@ -31,17 +32,25 @@ func (t *Tracee) handleEvents(ctx context.Context, initialized chan<- struct{}) defer logger.Debugw("Stopped handleEvents goroutine") var errcList []<-chan error + var eventsChan <-chan *trace.Event + var errc <-chan error - // Decode stage: events are read from the perf buffer and decoded into trace.Event type. + if t.producer != nil { + // Produce stage: events are produced from input file - eventsChan, errc := t.decodeEvents(ctx, t.eventsChannel) - errcList = append(errcList, errc) - - // Cache stage: events go through a caching function. + eventsChan, errc = t.produceEvents(ctx) + } else { + // Decode stage: events are read from the perf buffer and decoded into trace.Event type. - if t.config.Cache != nil { - eventsChan, errc = t.queueEvents(ctx, eventsChan) + eventsChan, errc = t.decodeEvents(ctx, t.eventsChannel) errcList = append(errcList, errc) + + // Cache stage: events go through a caching function. + + if t.config.Cache != nil { + eventsChan, errc = t.queueEvents(ctx, eventsChan) + errcList = append(errcList, errc) + } } // Sort stage: events go through a sorting function. @@ -56,18 +65,20 @@ func (t *Tracee) handleEvents(ctx context.Context, initialized chan<- struct{}) eventsChan, errc = t.processEvents(ctx, eventsChan) errcList = append(errcList, errc) - // Enrichment stage: container events are enriched with additional runtime data. + if t.producer == nil { + // Enrichment stage: container events are enriched with additional runtime data. + if !t.config.NoContainersEnrich { // TODO: remove safe-guard soon. + eventsChan, errc = t.enrichContainerEvents(ctx, eventsChan) + errcList = append(errcList, errc) + } + + // Derive events stage: events go through a derivation function. + // TODO: Figure how to maybe derive events in analyze mode without conflicts - if !t.config.NoContainersEnrich { // TODO: remove safe-guard soon. - eventsChan, errc = t.enrichContainerEvents(ctx, eventsChan) + eventsChan, errc = t.deriveEvents(ctx, eventsChan) errcList = append(errcList, errc) } - // Derive events stage: events go through a derivation function. - - eventsChan, errc = t.deriveEvents(ctx, eventsChan) - errcList = append(errcList, errc) - // Engine events stage: events go through the signatures engine for detection. if t.config.EngineConfig.Enabled { @@ -645,6 +656,60 @@ func (t *Tracee) sinkEvents(ctx context.Context, in <-chan *trace.Event) <-chan return errc } +func (t *Tracee) produceEvents(ctx context.Context) ( + <-chan *trace.Event, <-chan error, +) { + out := make(chan *trace.Event) + errc := make(chan error, 1) + + go func() { + defer close(out) + defer close(errc) + + for { + select { + case <-ctx.Done(): + case <-t.producer.Done(): + return + default: + e, err := t.producer.Produce() + if err != nil { + if err == io.EOF { + // TODO: Signal context done + return + } + logger.Fatalw("Error with events producer", "err", err) + } + e.MatchedPoliciesKernel = policy.PolicyAll + policiesVersion := uint16(1) // Use default policies min value + currentPolicies, err := policy.Snapshots().GetLast() + if err != nil { + logger.Errorw("Error getting last policy snapshot", "err", err) + } else { + policiesVersion = currentPolicies.Version() + } + e.PoliciesVersion = policiesVersion + // If there aren't any policies that need filtering in userland, tracee **may** skip + // this event, as long as there aren't any derivatives or signatures that depend on it. + // Some base events (derivative and signatures) might not have set related policy bit, + // thus the need to continue with those within the pipeline. + if t.matchPolicies(&e) == 0 { + _, hasDerivation := t.eventDerivations[events.ID(e.EventID)] + _, hasSignature := t.eventSignatures[events.ID(e.EventID)] + + if !hasDerivation && !hasSignature { + _ = t.stats.EventsFiltered.Increment() + continue + } + } + out <- &e + } + } + }() + + return out, errc +} + // getStackAddresses returns the stack addresses for a given StackID func (t *Tracee) getStackAddresses(stackID uint32) []uint64 { stackAddresses := make([]uint64, maxStackDepth) diff --git a/pkg/ebpf/processor.go b/pkg/ebpf/processor.go index 72a7ffb9bfd1..2ee0921d7d18 100644 --- a/pkg/ebpf/processor.go +++ b/pkg/ebpf/processor.go @@ -93,8 +93,6 @@ func (t *Tracee) registerEventProcessors() { if t.config.ProcTree.Source != proctree.SourceNone { t.RegisterEventProcessor(events.All, t.procTreeAddBinInfo) } - // Processors regitered even if process tree source is disabled. - t.RegisterEventProcessor(events.SchedProcessFork, t.procTreeForkRemoveArgs) // // DNS Cache Processors @@ -126,9 +124,12 @@ func (t *Tracee) registerEventProcessors() { // Event Timestamps Normalization Processors // - // Convert all time relate args to nanoseconds since epoch. - // NOTE: Make sure to convert time related args (of your event) in here. - t.RegisterEventProcessor(events.SchedProcessFork, t.processSchedProcessFork) + t.RegisterEventProcessor(events.InitTraceeData, t.processInitTraceeDataEvent) + if t.producer == nil { // TODO: Remove this check once producer is able to revert arguments timestamps normalization + // Convert all time relate args to nanoseconds since epoch. + // NOTE: Make sure to convert time related args (of your event) in here. + t.RegisterEventProcessor(events.SchedProcessFork, t.processSchedProcessFork) + } t.RegisterEventProcessor(events.All, t.normalizeEventCtxTimes) } diff --git a/pkg/ebpf/processor_funcs.go b/pkg/ebpf/processor_funcs.go index d5f309d3517c..b533e033303f 100644 --- a/pkg/ebpf/processor_funcs.go +++ b/pkg/ebpf/processor_funcs.go @@ -25,7 +25,7 @@ import ( // processWriteEvent processes a write event by indexing the written file. func (t *Tracee) processWriteEvent(event *trace.Event) error { // only capture written files - if !t.config.Capture.FileWrite.Capture { + if t.config.Capture == nil || !t.config.Capture.FileWrite.Capture { return nil } filePath, err := parse.ArgVal[string](event.Args, "pathname") @@ -125,7 +125,7 @@ func (t *Tracee) processSchedProcessExec(event *trace.Event) error { } // capture executed files - if t.config.Capture.Exec || t.config.Output.CalcHashes != config.CalcHashesNone { + if t.config.Capture != nil && t.config.Capture.Exec || t.config.Output.CalcHashes != config.CalcHashesNone { filePath, err := parse.ArgVal[string](event.Args, "pathname") if err != nil { return errfmt.Errorf("error parsing sched_process_exec args: %v", err) @@ -412,6 +412,16 @@ func (t *Tracee) normalizeEventArgTime(event *trace.Event, argName string) error return nil } +// processInitTraceeDataEvent processes the init_tracee_data event to get system info +// This processing function is must for analyze mode to work well +func (t *Tracee) processInitTraceeDataEvent(event *trace.Event) error { + var err error + if t.bootTime == 0 { + t.bootTime, err = parse.ArgVal[uint64](event.Args, "boot_time") + } + return err +} + // addHashArg calculate file hash (in a best-effort efficiency manner) and add it as an argument func (t *Tracee) addHashArg(event *trace.Event, fileKey *filehash.Key) error { // Currently Tracee does not support hash calculation of memfd files diff --git a/pkg/ebpf/processor_proctree.go b/pkg/ebpf/processor_proctree.go index 6e5a720fc24e..84c60b4dc5fb 100644 --- a/pkg/ebpf/processor_proctree.go +++ b/pkg/ebpf/processor_proctree.go @@ -98,39 +98,6 @@ func (t *Tracee) procTreeForkProcessor(event *trace.Event) error { ) } -// procTreeForkRemoveArgs removes arguments needed for the process tree only (when source is events). -func (t *Tracee) procTreeForkRemoveArgs(event *trace.Event) error { - argsToRemove := []string{ - "up_parent_tid", - "up_parent_ns_tid", - "up_parent_pid", - "up_parent_ns_pid", - "up_parent_start_time", - "leader_tid", - "leader_ns_tid", - "leader_pid", - "leader_ns_pid", - "leader_start_time", - } - - m := make(map[string]trace.Argument) - for _, arg := range event.Args { - m[arg.Name] = arg - } - - for _, argName := range argsToRemove { - delete(m, argName) - } - - event.Args = make([]trace.Argument, 0, len(m)) - for _, arg := range m { - event.Args = append(event.Args, arg) - } - event.ArgsNum = len(event.Args) - - return nil -} - // procTreeExecProcessor handles process exec events. func (t *Tracee) procTreeExecProcessor(event *trace.Event) error { var errs []error diff --git a/pkg/ebpf/signature_engine.go b/pkg/ebpf/signature_engine.go index 8bbaff96a567..3c469eee5a69 100644 --- a/pkg/ebpf/signature_engine.go +++ b/pkg/ebpf/signature_engine.go @@ -2,6 +2,7 @@ package ebpf import ( "context" + "sync" "github.com/aquasecurity/tracee/pkg/containers" "github.com/aquasecurity/tracee/pkg/dnscache" @@ -53,6 +54,10 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch go t.sigEngine.Start(ctx) + // Use wait group to close output channels only upon the end of both internal goroutines + wg := sync.WaitGroup{} + wg.Add(2) + // Create a function for feeding the engine with an event feedFunc := func(event *trace.Event) { if event == nil { @@ -87,14 +92,15 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch // and converting detect.Finding into trace.Event go func() { - defer close(out) - defer close(errc) defer close(engineInput) - defer close(engineOutput) + defer wg.Done() for { select { - case event := <-in: + case event, ok := <-in: + if !ok { + return + } feedFunc(event) case event := <-engineOutputEvents: feedFunc(event) @@ -105,10 +111,11 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch }() go func() { + defer wg.Done() for { select { - case finding := <-engineOutput: - if finding == nil { + case finding, ok := <-engineOutput: + if !ok || finding == nil{ return // channel is closed } if finding.Event.Payload == nil { @@ -133,6 +140,12 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch } }() + go func() { + wg.Wait() + close(out) + close(errc) + }() + return out, errc } @@ -141,7 +154,9 @@ func (t *Tracee) PrepareBuiltinDataSources() []detect.DataSource { datasources := []detect.DataSource{} // Containers Data Source - datasources = append(datasources, containers.NewDataSource(t.containers)) + if t.containers != nil { + datasources = append(datasources, containers.NewDataSource(t.containers)) + } // DNS Data Source if t.config.DNSCacheConfig.Enable { diff --git a/pkg/ebpf/tracee.go b/pkg/ebpf/tracee.go index 8cb378bc4c58..4ba0c9bc0810 100644 --- a/pkg/ebpf/tracee.go +++ b/pkg/ebpf/tracee.go @@ -38,6 +38,7 @@ import ( "github.com/aquasecurity/tracee/pkg/pcaps" "github.com/aquasecurity/tracee/pkg/policy" "github.com/aquasecurity/tracee/pkg/proctree" + "github.com/aquasecurity/tracee/pkg/producer" "github.com/aquasecurity/tracee/pkg/signatures/engine" "github.com/aquasecurity/tracee/pkg/streams" "github.com/aquasecurity/tracee/pkg/utils" @@ -121,6 +122,8 @@ type Tracee struct { policyManager *policyManager // The dependencies of events used by Tracee eventsDependencies *dependencies.Manager + // producer produce events in analyze mode instead of eBPF programs + producer producer.EventsProducer } func (t *Tracee) Stats() *metrics.Stats { @@ -249,14 +252,6 @@ func New(cfg config.Config) (*Tracee, error) { t.removeEventFromState(node.GetID()) }) - // Initialize capabilities rings soon - - err = capabilities.Initialize(t.config.Capabilities.BypassCaps) - if err != nil { - return t, errfmt.WrapError(err) - } - caps := capabilities.GetInstance() - // Initialize events state with mandatory events (TODO: review this need for sched exec) t.chooseEvent(events.SchedProcessFork, events.EventState{}) @@ -270,10 +265,11 @@ func New(cfg config.Config) (*Tracee, error) { // Control Plane Process Tree Events + processTreeEvents := []events.ID{events.SchedProcessFork, events.SchedProcessExec, events.SchedProcessExit} pipeEvts := func() { - t.chooseEvent(events.SchedProcessFork, policy.AlwaysSubmit) - t.chooseEvent(events.SchedProcessExec, policy.AlwaysSubmit) - t.chooseEvent(events.SchedProcessExit, policy.AlwaysSubmit) + for _, id := range processTreeEvents { + t.chooseEvent(id, policy.AlwaysSubmit) + } } signalEvts := func() { t.chooseEvent(events.SignalSchedProcessFork, policy.AlwaysSubmit) @@ -298,9 +294,10 @@ func New(cfg config.Config) (*Tracee, error) { } // Pseudo events added by capture (if enabled by the user) - - for eventID, eCfg := range GetCaptureEventsList(cfg) { - t.chooseEvent(eventID, eCfg) + if t.config.Capture != nil { + for eventID, eCfg := range GetCaptureEventsList(cfg) { + t.chooseEvent(eventID, eCfg) + } } // Events chosen by the user @@ -323,25 +320,72 @@ func New(cfg config.Config) (*Tracee, error) { } } + // Start event triggering logic context + + t.triggerContexts = trigger.NewContext() + + if t.config.MaxPidsCache == 0 { + t.config.MaxPidsCache = 5 // TODO: configure this ? never set, default = 5 + } + + return t, nil + // Update capabilities rings with all events dependencies + // Start event triggering logic context - // TODO: extract this to a function to be called from here and from - // policies changes. - for id := range t.eventsState { - if !events.Core.IsDefined(id) { - return t, errfmt.Errorf("event %d is not defined", id) - } - depsNode, ok := t.eventsDependencies.GetEvent(id) - if ok { - deps := depsNode.GetDependencies() - evtCaps := deps.GetCapabilities() - err = caps.BaseRingAdd(evtCaps.GetBase()...) - if err != nil { - return t, errfmt.WrapError(err) + t.triggerContexts = trigger.NewContext() + + if t.config.MaxPidsCache == 0 { + t.config.MaxPidsCache = 5 // TODO: configure this ? never set, default = 5 + } + + return t, nil +} + +// Init initialize tracee instance and it's various subsystems, potentially +// performing external system operations to initialize them. NOTE: any +// initialization logic, especially one that causes side effects, should go +// here and not New(). +func (t *Tracee) Init(ctx gocontext.Context) error { + // In this stage we expect to either have a producer or an eBPF object to load + if t.producer == nil && t.config.BPFObjBytes == nil { + return errfmt.Errorf("nil bpf object in memory") + } + + // Initialize needed values + + // Initialize capabilities rings soon + + err := capabilities.Initialize(t.config.Capabilities.BypassCaps) + if err != nil { + return errfmt.WrapError(err) + } + caps := capabilities.GetInstance() + + // Handle all essential events dependencies + + // We need to update capabilities only for events produced using eBPF + if t.producer == nil { + // Update capabilities rings with all events dependencies + + // TODO: extract this to a function to be called from here and from + // policies changes. + for id := range t.eventsState { + if !events.Core.IsDefined(id) { + return errfmt.Errorf("event %d is not defined", id) } - err = caps.BaseRingAdd(evtCaps.GetEBPF()...) - if err != nil { - return t, errfmt.WrapError(err) + depsNode, ok := t.eventsDependencies.GetEvent(id) + if ok { + deps := depsNode.GetDependencies() + evtCaps := deps.GetCapabilities() + err := caps.BaseRingAdd(evtCaps.GetBase()...) + if err != nil { + return errfmt.WrapError(err) + } + err = caps.BaseRingAdd(evtCaps.GetEBPF()...) + if err != nil { + return errfmt.WrapError(err) + } } } } @@ -350,38 +394,79 @@ func New(cfg config.Config) (*Tracee, error) { capsToAdd, err := capabilities.ReqByString(t.config.Capabilities.AddCaps...) if err != nil { - return t, errfmt.WrapError(err) + return errfmt.WrapError(err) } err = caps.BaseRingAdd(capsToAdd...) if err != nil { - return t, errfmt.WrapError(err) + return errfmt.WrapError(err) } capsToDrop, err := capabilities.ReqByString(t.config.Capabilities.DropCaps...) if err != nil { - return t, errfmt.WrapError(err) + return errfmt.WrapError(err) } err = caps.BaseRingRemove(capsToDrop...) if err != nil { - return t, errfmt.WrapError(err) + return errfmt.WrapError(err) } // Register default event processors t.registerEventProcessors() - // Start event triggering logic context + // Initialize the pids per mount ns to cache - t.triggerContexts = trigger.NewContext() + t.pidsInMntns.Init(t.config.MaxPidsCache) - return t, nil + // Initialize events pool + + t.eventsPool = &sync.Pool{ + New: func() interface{} { + return &trace.Event{} + }, + } + + // Initialize events sorting (pipeline step) + + if t.config.Output.EventsSorting { + t.eventsSorter, err = sorting.InitEventSorter() + if err != nil { + return errfmt.WrapError(err) + } + } + + // Initialize events parameter types map + t.eventsParamTypes = make(map[events.ID][]bufferdecoder.ArgType) + for _, eventDefinition := range events.Core.GetDefinitions() { + id := eventDefinition.GetID() + params := eventDefinition.GetParams() + for _, param := range params { + t.eventsParamTypes[id] = append(t.eventsParamTypes[id], bufferdecoder.GetParamType(param.Type)) + } + } + + // Initialize Process Tree (if enabled) + + if t.config.ProcTree.Source != proctree.SourceNone { + t.processTree, err = proctree.NewProcessTree(ctx, t.config.ProcTree) + if err != nil { + return errfmt.WrapError(err) + } + } + + if t.producer == nil { + err = t.initBPFProducing(ctx) + if err != nil { + return err + } + } + + return nil } -// Init initialize tracee instance and it's various subsystems, potentially -// performing external system operations to initialize them. NOTE: any -// initialization logic, especially one that causes side effects, should go -// here and not New(). -func (t *Tracee) Init(ctx gocontext.Context) error { +// initBPFProducing includes all the initializations needed when producing events through eBPF. +// Not all the things are related directly to the eBPF, as they also affect the pipeline and environment. +func (t *Tracee) initBPFProducing(ctx gocontext.Context) error { var err error // Init kernel symbols map @@ -399,16 +484,8 @@ func (t *Tracee) Init(ctx gocontext.Context) error { t.validateKallsymsDependencies() // disable events w/ missing ksyms dependencies - // Initialize buckets cache - var mntNSProcs map[int]int - if t.config.MaxPidsCache == 0 { - t.config.MaxPidsCache = 5 // TODO: configure this ? never set, default = 5 - } - - t.pidsInMntns.Init(t.config.MaxPidsCache) - err = capabilities.GetInstance().Specific( func() error { mntNSProcs, err = proc.GetMountNSFirstProcesses() @@ -425,15 +502,6 @@ func (t *Tracee) Init(ctx gocontext.Context) error { logger.Debugw("Initializing buckets cache", "error", errfmt.WrapError(err)) } - // Initialize Process Tree (if enabled) - - if t.config.ProcTree.Source != proctree.SourceNone { - t.processTree, err = proctree.NewProcessTree(ctx, t.config.ProcTree) - if err != nil { - return errfmt.WrapError(err) - } - } - // Initialize cgroups filesystems t.cgroups, err = cgroup.NewCgroups() @@ -477,16 +545,6 @@ func (t *Tracee) Init(ctx gocontext.Context) error { return errfmt.Errorf("error initializing event derivation map: %v", err) } - // Initialize events parameter types map - t.eventsParamTypes = make(map[events.ID][]bufferdecoder.ArgType) - for _, eventDefinition := range events.Core.GetDefinitions() { - id := eventDefinition.GetID() - params := eventDefinition.GetParams() - for _, param := range params { - t.eventsParamTypes[id] = append(t.eventsParamTypes[id], bufferdecoder.GetParamType(param.Type)) - } - } - // Initialize eBPF programs and maps err = capabilities.GetInstance().EBPF( @@ -546,23 +604,6 @@ func (t *Tracee) Init(ctx gocontext.Context) error { } t.FDArgPathMap = fdArgPathMap - // Initialize events sorting (pipeline step) - - if t.config.Output.EventsSorting { - t.eventsSorter, err = sorting.InitEventSorter() - if err != nil { - return errfmt.WrapError(err) - } - } - - // Initialize events pool - - t.eventsPool = &sync.Pool{ - New: func() interface{} { - return &trace.Event{} - }, - } - // Initialize times t.startTime = uint64(utils.GetStartTimeNS()) @@ -808,7 +849,6 @@ const ( optTranslateFDFilePath optCaptureBpf optCaptureFileRead - optForkProcTree ) func (t *Tracee) getOptionsConfig() uint32 { @@ -842,10 +882,6 @@ func (t *Tracee) getOptionsConfig() uint32 { if t.config.Output.ParseArgumentsFDs { cOptVal = cOptVal | optTranslateFDFilePath } - switch t.config.ProcTree.Source { - case proctree.SourceBoth, proctree.SourceEvents: - cOptVal = cOptVal | optForkProcTree // tell sched_process_fork to be prolix - } return cOptVal } @@ -1244,82 +1280,99 @@ const pollTimeout int = 300 // Run starts the trace. it will run until ctx is cancelled func (t *Tracee) Run(ctx gocontext.Context) error { - // Some events need initialization before the perf buffers are polled + // eBPF initialization used when eBPF producer is used + if t.producer == nil { + // Some events need initialization before the perf buffers are polled - go t.hookedSyscallTableRoutine(ctx) + go t.hookedSyscallTableRoutine(ctx) - t.triggerSeqOpsIntegrityCheck(trace.Event{}) - errs := t.triggerMemDump(trace.Event{}) - for _, err := range errs { - logger.Warnw("Memory dump", "error", err) - } + t.triggerSeqOpsIntegrityCheck(trace.Event{}) + errs := t.triggerMemDump(trace.Event{}) + for _, err := range errs { + logger.Warnw("Memory dump", "error", err) + } + + go t.lkmSeekerRoutine(ctx) - go t.lkmSeekerRoutine(ctx) + // Start control plane + t.controlPlane.Start() + go t.controlPlane.Run(ctx) - // Start control plane - t.controlPlane.Start() - go t.controlPlane.Run(ctx) + // Main event loop (polling events perf buffer) - // Main event loop (polling events perf buffer) + t.eventsPerfMap.Poll(pollTimeout) - t.eventsPerfMap.Poll(pollTimeout) + go t.processLostEvents() // termination signaled by closing t.done + } pipelineReady := make(chan struct{}, 1) - go t.processLostEvents() // termination signaled by closing t.done - go t.handleEvents(ctx, pipelineReady) + pipelineDone := make(chan struct{}) + go func() { + t.handleEvents(ctx, pipelineReady) + close(pipelineDone) + }() - // Parallel perf buffer with file writes events + if t.producer == nil { + // Parallel perf buffer with file writes events - if t.config.BlobPerfBufferSize > 0 { - t.fileWrPerfMap.Poll(pollTimeout) - go t.handleFileCaptures(ctx) - } + if t.config.BlobPerfBufferSize > 0 { + t.fileWrPerfMap.Poll(pollTimeout) + go t.handleFileCaptures(ctx) + } - // Network capture perf buffer (similar to regular pipeline) + // Network capture perf buffer (similar to regular pipeline) - if pcaps.PcapsEnabled(t.config.Capture.Net) { - t.netCapPerfMap.Poll(pollTimeout) - go t.handleNetCaptureEvents(ctx) - } + if pcaps.PcapsEnabled(t.config.Capture.Net) { + t.netCapPerfMap.Poll(pollTimeout) + go t.handleNetCaptureEvents(ctx) + } - // Logging perf buffer + // Logging perf buffer - t.bpfLogsPerfMap.Poll(pollTimeout) - go t.processBPFLogs(ctx) + t.bpfLogsPerfMap.Poll(pollTimeout) + go t.processBPFLogs(ctx) + } // Management <-pipelineReady t.running.Store(true) // set running state after writing pid file t.ready(ctx) // executes ready callback, non blocking - <-ctx.Done() // block until ctx is cancelled elsewhere + select { + case <-ctx.Done(): // block until ctx is cancelled elsewhere + case <-pipelineDone: + break + } - // Close perf buffers + // eBPF closure should only occur with eBPF producer + if t.producer == nil { + // Close perf buffers - t.eventsPerfMap.Close() - if t.config.BlobPerfBufferSize > 0 { - t.fileWrPerfMap.Close() - } - if pcaps.PcapsEnabled(t.config.Capture.Net) { - t.netCapPerfMap.Close() - } - t.bpfLogsPerfMap.Close() + t.eventsPerfMap.Close() + if t.config.BlobPerfBufferSize > 0 { + t.fileWrPerfMap.Close() + } + if pcaps.PcapsEnabled(t.config.Capture.Net) { + t.netCapPerfMap.Close() + } + t.bpfLogsPerfMap.Close() - // TODO: move logic below somewhere else (related to file writes) + // TODO: move logic below somewhere else (related to file writes) - // record index of written files - if t.config.Capture.FileWrite.Capture { - err := updateCaptureMapFile(t.OutDir, "written_files", t.writtenFiles, t.config.Capture.FileWrite) - if err != nil { - return err + // record index of written files + if t.config.Capture.FileWrite.Capture { + err := updateCaptureMapFile(t.OutDir, "written_files", t.writtenFiles, t.config.Capture.FileWrite) + if err != nil { + return err + } } - } - // record index of read files - if t.config.Capture.FileRead.Capture { - err := updateCaptureMapFile(t.OutDir, "read_files", t.readFiles, t.config.Capture.FileRead) - if err != nil { - return err + // record index of read files + if t.config.Capture.FileRead.Capture { + err := updateCaptureMapFile(t.OutDir, "read_files", t.readFiles, t.config.Capture.FileRead) + if err != nil { + return err + } } } @@ -1385,8 +1438,10 @@ func (t *Tracee) Close() { logger.Errorw("failed to clean containers module when closing tracee", "err", err) } } - if err := t.cgroups.Destroy(); err != nil { - logger.Errorw("Cgroups destroy", "error", err) + if t.cgroups != nil { + if err := t.cgroups.Destroy(); err != nil { + logger.Errorw("Cgroups destroy", "error", err) + } } // set 'running' to false and close 'done' channel only after attempting to close all resources @@ -1507,12 +1562,18 @@ func (t *Tracee) invokeInitEvents(out chan *trace.Event) { // Initial namespace events + matchedPolicies = policiesMatch(t.eventsState[events.InitTraceeData]) + if matchedPolicies > 0 { + traceeDataEvent := events.InitTraceeDataEvent(t.bootTime, t.startTime) + setMatchedPolicies(&traceeDataEvent, matchedPolicies, t.config.Policies) + out <- &traceeDataEvent + } + matchedPolicies = policiesMatch(t.eventsState[events.InitNamespaces]) if matchedPolicies > 0 { systemInfoEvent := events.InitNamespacesEvent() setMatchedPolicies(&systemInfoEvent, matchedPolicies, t.config.Policies) out <- &systemInfoEvent - _ = t.stats.EventCount.Increment() } // Initial existing containers events (1 event per container) @@ -1524,7 +1585,6 @@ func (t *Tracee) invokeInitEvents(out chan *trace.Event) { event := &(existingContainerEvents[i]) setMatchedPolicies(event, matchedPolicies, t.config.Policies) out <- event - _ = t.stats.EventCount.Increment() } } @@ -1772,6 +1832,10 @@ func (t *Tracee) Unsubscribe(s *streams.Stream) { t.streamsManager.Unsubscribe(s) } +func (t *Tracee) SetProducer(eventsProducer producer.EventsProducer) { + t.producer = eventsProducer +} + func (t *Tracee) EnableEvent(eventName string) error { id, found := events.Core.GetDefinitionIDByName(eventName) if !found { diff --git a/pkg/events/core.go b/pkg/events/core.go index bc8dcd9d96b1..965a543c89e6 100644 --- a/pkg/events/core.go +++ b/pkg/events/core.go @@ -142,6 +142,7 @@ const ( SymbolsCollision HiddenKernelModule FtraceHook + InitTraceeData MaxUserSpace ) @@ -11147,7 +11148,7 @@ var CoreEvents = map[ID]Definition{ {handle: probes.SchedProcessFork, required: true}, }, }, - sets: []string{}, + sets: []string{"analyze_essentials"}, params: []trace.ArgMeta{ // Real Parent {Type: "int", Name: "parent_tid"}, @@ -11162,7 +11163,6 @@ var CoreEvents = map[ID]Definition{ {Type: "int", Name: "child_ns_pid"}, {Type: "unsigned long", Name: "start_time"}, // child_start_time // Arguments set by OPT_PROCESS_FORK (when process tree source is enabled for fork events). - // These arguments are always removed after process tree processing. // Up Parent (Up in hierarchy until parent is a process and not a lwp) {Type: "int", Name: "up_parent_tid"}, {Type: "int", Name: "up_parent_ns_tid"}, @@ -11202,7 +11202,7 @@ var CoreEvents = map[ID]Definition{ }, }, }, - sets: []string{"default", "proc"}, + sets: []string{"default", "proc", "analyze_essentials"}, params: []trace.ArgMeta{ {Type: "const char*", Name: "cmdpath"}, {Type: "const char*", Name: "pathname"}, @@ -11233,7 +11233,7 @@ var CoreEvents = map[ID]Definition{ {handle: probes.SchedProcessFree, required: true}, }, }, - sets: []string{"proc", "proc_life"}, + sets: []string{"proc", "proc_life", "analyze_essentials"}, params: []trace.ArgMeta{ {Type: "long", Name: "exit_code"}, // The field value represents that all threads exited at the event time. @@ -11907,6 +11907,18 @@ var CoreEvents = map[ID]Definition{ {Type: "u32", Name: "uts"}, }, }, + InitTraceeData: { + id: InitTraceeData, + id32Bit: Sys32Undefined, + name: "init_tracee_info", + version: NewVersion(1, 0, 0), + sets: []string{"analyze_essentials"}, + dependencies: Dependencies{}, + params: []trace.ArgMeta{ + {Type: "u64", Name: "boot_time"}, + {Type: "u64", Name: "start_time"}, + }, + }, SocketDup: { id: SocketDup, id32Bit: Sys32Undefined, diff --git a/pkg/events/ftrace.go b/pkg/events/ftrace.go index c8a7164232ce..7e283d05b8b7 100644 --- a/pkg/events/ftrace.go +++ b/pkg/events/ftrace.go @@ -192,7 +192,6 @@ func checkFtraceHooks(eventsCounter counter.Counter, out chan *trace.Event, base event.ArgsNum = len(args) out <- &event - _ = eventsCounter.Increment() } return nil diff --git a/pkg/events/usermode.go b/pkg/events/usermode.go index 58ac17f7bf8d..36c3bd23e744 100644 --- a/pkg/events/usermode.go +++ b/pkg/events/usermode.go @@ -49,6 +49,27 @@ func InitNamespacesEvent() trace.Event { return initNamespacesEvent } +// InitTraceeDataEvent exports data related to Tracee's initialization +func InitTraceeDataEvent(bootTime uint64, startTime uint64) trace.Event { + def := Core.GetDefinitionByID(InitTraceeData) + params := def.GetParams() + args := []trace.Argument{ + {ArgMeta: params[0], Value: bootTime}, + {ArgMeta: params[1], Value: startTime}, + } + + initTraceeDataEvent := trace.Event{ + Timestamp: int(time.Now().UnixNano()), + ProcessName: "tracee-ebpf", + EventID: int(def.GetID()), + EventName: def.GetName(), + ArgsNum: len(args), + Args: args, + } + + return initTraceeDataEvent +} + // getInitNamespaceArguments fetches the namespaces of the init process and // parse them into event arguments. func getInitNamespaceArguments() []trace.Argument { diff --git a/pkg/policy/policies.go b/pkg/policy/policies.go index 8c89392365e2..9e6b25618ece 100644 --- a/pkg/policy/policies.go +++ b/pkg/policy/policies.go @@ -18,9 +18,11 @@ const ( PolicyNone = uint64(0) ) -var AlwaysSubmit = events.EventState{ - Submit: PolicyAll, -} +var ( + AlwaysSubmit = events.EventState{Submit: PolicyAll} + AlwaysEmit = events.EventState{Emit: PolicyAll} + AlwaysSubmitAndEmit = events.EventState{Submit: PolicyAll, Emit: PolicyAll} +) type Policies struct { rwmu sync.RWMutex diff --git a/pkg/proctree/proctree.go b/pkg/proctree/proctree.go index fa301d93ecce..0d3b9cb68fd0 100644 --- a/pkg/proctree/proctree.go +++ b/pkg/proctree/proctree.go @@ -78,7 +78,7 @@ type ProcessTree struct { procfsOnce *sync.Once // busy loop debug message throttling ctx context.Context // context for the process tree mutex *sync.RWMutex // mutex for the process tree - procfsQuery bool + procfsQuery bool // should use procfs queries } // NewProcessTree creates a new process tree. diff --git a/pkg/producer/json.go b/pkg/producer/json.go new file mode 100644 index 000000000000..ed2c65a647d2 --- /dev/null +++ b/pkg/producer/json.go @@ -0,0 +1,39 @@ +package producer + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + + "github.com/aquasecurity/tracee/types/trace" +) + +type jsonEventProducer struct { + in *bufio.Scanner + done chan struct{} +} + +func initJsonEventProducer(input io.Reader) *jsonEventProducer { + scanner := bufio.NewScanner(input) + scanner.Split(bufio.ScanLines) + return &jsonEventProducer{in: scanner, done: make(chan struct{})} +} + +func (j jsonEventProducer) Produce() (trace.Event, error) { + if !j.in.Scan() { // if EOF or error close the done channel and return + close(j.done) + return trace.Event{}, io.EOF + } + + var e trace.Event + err := json.Unmarshal(j.in.Bytes(), &e) + if err != nil { + return trace.Event{}, fmt.Errorf("failed to unmarshal event - %s", err.Error()) + } + return e, nil +} + +func (j jsonEventProducer) Done() <-chan struct{} { + return j.done +} diff --git a/pkg/producer/producer.go b/pkg/producer/producer.go new file mode 100644 index 000000000000..db15e1bd0f96 --- /dev/null +++ b/pkg/producer/producer.go @@ -0,0 +1,38 @@ +package producer + +import ( + "fmt" + + "github.com/aquasecurity/tracee/pkg/config" + "github.com/aquasecurity/tracee/pkg/errfmt" + "github.com/aquasecurity/tracee/types/trace" +) + +// EventsProducer is a type that is able to generate events +type EventsProducer interface { + // Produce produces a single event. + // Return io.EOF for end of events stream. + Produce() (trace.Event, error) + Done() <-chan struct{} +} + +func New(cfg *config.ProducerConfig) (EventsProducer, error) { + var res EventsProducer + kind := cfg.Kind + + if cfg.InputSource == nil { + return res, errfmt.Errorf("input source is not set") + } + + var inputProducer EventsProducer + switch kind { + case "json": + inputProducer = initJsonEventProducer(cfg.InputSource) + case "ebpf": + return nil, nil + case "rego": + default: + return nil, fmt.Errorf("unsupported producer kind - %s", cfg.Kind) + } + return InitTimeFixerProducer(inputProducer), nil +} diff --git a/pkg/producer/timefixer.go b/pkg/producer/timefixer.go new file mode 100644 index 000000000000..4dc66875df8e --- /dev/null +++ b/pkg/producer/timefixer.go @@ -0,0 +1,45 @@ +package producer + +import ( + "github.com/aquasecurity/tracee/pkg/events" + "github.com/aquasecurity/tracee/pkg/events/parse" + "github.com/aquasecurity/tracee/types/trace" +) + +// A decorator producer that is responsible to fix events timestamps so they +// will match the ones received from the kernel. +// In practice, it means changing all times from being since epoch to monotonic +// times (since boot). +type TimeFixerProducer struct { + internalProducer EventsProducer + bootTime int +} + +func InitTimeFixerProducer(producer EventsProducer) *TimeFixerProducer { + return &TimeFixerProducer{ + internalProducer: producer, + } +} + +func (tfixer *TimeFixerProducer) Produce() (trace.Event, error) { + event, err := tfixer.internalProducer.Produce() + if err != nil { + return trace.Event{}, nil + } + switch events.ID(event.EventID) { + case events.InitTraceeData: + bootTime, err := parse.ArgVal[uint64](event.Args, "boot_time") + if err != nil { + return event, err + } + tfixer.bootTime = int(bootTime) + fallthrough + default: + event.Timestamp -= tfixer.bootTime + } + return event, nil +} + +func (tfixer *TimeFixerProducer) Done() <-chan struct{} { + return tfixer.internalProducer.Done() +} diff --git a/pkg/producer/timefixer_test.go b/pkg/producer/timefixer_test.go new file mode 100644 index 000000000000..0bc71e1e64af --- /dev/null +++ b/pkg/producer/timefixer_test.go @@ -0,0 +1,104 @@ +package producer_test + +import ( + "io" + "testing" + + "github.com/aquasecurity/tracee/pkg/events" + "github.com/aquasecurity/tracee/pkg/producer" + "github.com/aquasecurity/tracee/types/trace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockProducer struct { + producedEvents []trace.Event + currentEvent int + done chan struct{} +} + +func initMockTimeFixerProducer(eventsToProduce []trace.Event) *mockProducer { + return &mockProducer{ + producedEvents: eventsToProduce, + } +} + +func (m *mockProducer) Produce() (trace.Event, error) { + index := m.currentEvent + m.currentEvent += 1 + if m.currentEvent == len(m.producedEvents) { + m.done <- struct{}{} + } + if index >= len(m.producedEvents) { + return trace.Event{}, io.EOF + } + return m.producedEvents[index], nil +} + +func (m *mockProducer) Done() <-chan struct{} { + return m.done +} + +func TestTimeFixerProducer(t *testing.T) { + testCases := []struct { + name string + events []trace.Event + expectedTimestamps []int + }{ + { + name: "no init event", + events: []trace.Event{ + { + EventID: int(events.SchedProcessExec), + Timestamp: 1000, + }, + }, + expectedTimestamps: []int{1000}, + }, + { + name: "init event", + events: []trace.Event{ + { + EventID: int(events.InitTraceeData), + Timestamp: 1000, + Args: []trace.Argument{ + { + ArgMeta: trace.ArgMeta{ + Name: "boot_time", + Type: "uint64", + }, + Value: uint64(80), + }, + }, + }, + { + EventID: int(events.SchedProcessExec), + Timestamp: 1000, + }, + }, + expectedTimestamps: []int{20, 20}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // Create a new TimeFixerProducer + mockProcer := initMockTimeFixerProducer(testCase.events) + tfixer := producer.InitTimeFixerProducer(mockProcer) + + for i := 0; ; i++ { + // Check the produced events' timestamps + event, err := tfixer.Produce() + if err == io.EOF { + assert.Equal(t, len(testCase.expectedTimestamps), i) + break + } + require.NoError(t, err) + assert.Equal(t, testCase.expectedTimestamps[i], event.Timestamp) + if event.Timestamp != testCase.expectedTimestamps[i] { + t.Errorf("Expected timestamp %d, got %d", testCase.expectedTimestamps[i], event.Timestamp) + } + } + }) + } +} diff --git a/pkg/signatures/engine/engine.go b/pkg/signatures/engine/engine.go index 6fd8a80aa3ff..523abd895def 100644 --- a/pkg/signatures/engine/engine.go +++ b/pkg/signatures/engine/engine.go @@ -129,6 +129,7 @@ func (engine *Engine) Start(ctx context.Context) { } engine.signaturesMutex.RUnlock() engine.consumeSources(ctx) + close(engine.output) } func (engine *Engine) unloadAllSignatures() { @@ -232,7 +233,6 @@ func (engine *Engine) consumeSources(ctx context.Context) { engine.signaturesMutex.RUnlock() engine.inputs.Tracee = nil if engine.checkCompletion() { - close(engine.output) return } diff --git a/pkg/utils/files.go b/pkg/utils/files.go index 455b64e1bbfe..b307b009b18d 100644 --- a/pkg/utils/files.go +++ b/pkg/utils/files.go @@ -23,11 +23,11 @@ func OpenExistingDir(p string) (*os.File, error) { // OpenAt is a wrapper function to the `openat` syscall using golang types. func OpenAt(dir *os.File, relativePath string, flags int, perm fs.FileMode) (*os.File, error) { - pidFileFD, err := unix.Openat(int(dir.Fd()), relativePath, flags, uint32(perm)) + fd, err := unix.Openat(int(dir.Fd()), relativePath, flags, uint32(perm)) if err != nil { return nil, errfmt.WrapError(err) } - return os.NewFile(uintptr(pidFileFD), path.Join(dir.Name(), relativePath)), nil + return os.NewFile(uintptr(fd), path.Join(dir.Name(), relativePath)), nil } // RemoveAt is a wrapper function to the `unlinkat` syscall using golang types. diff --git a/tests/e2e-analyze-inst-test.sh b/tests/e2e-analyze-inst-test.sh new file mode 100755 index 000000000000..9fa31a6f5342 --- /dev/null +++ b/tests/e2e-analyze-inst-test.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +# +# This test is executed by github workflows inside the action runners +# + +SCRIPT_TMP_DIR=/tmp/analyze_test +TRACEE_TMP_DIR=/tmp/tracee + +SCRIPT_PATH="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" +TESTS_DIR="$SCRIPT_DIR/e2e-inst-signatures/scripts" +SIG_DIR="$SCRIPT_DIR/../dist/e2e-inst-signatures" +SIG_SOURCE_DIR="$SCRIPT_DIR/e2e-inst-signatures/" + +source $SCRIPT_DIR/inst_tests_funcs.sh + +if [[ $UID -ne 0 ]]; then + error_exit "need root privileges" +fi + +# Default test to run if no other is given +TESTS=${INSTTESTS:=VFS_WRITE} + +# Tests to exclude from running +EXCLUDE_TESTS="PROCTREE_DATA_SOURCE CONTAINERS_DATA_SOURCE WRITABLE_DATA_SOURCE DNS_DATA_SOURCE" + +# Remove excluded tests from TESTS variable +for exclude_test in $EXCLUDE_TESTS; do + TESTS=${TESTS//$exclude_test/} +done + +# Remove any leading or trailing whitespace +TESTS=$(echo "$TESTS" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') + +backup_export "$SIG_SOURCE_DIR" +# Put all the tests files in the EXCLUDE_TESTS variable into a variable +EXCLUDED_FILES="" +for exclude_test in $EXCLUDE_TESTS; do + signature_file=$(find_signature_file "$SIG_SOURCE_DIR" "$exclude_test") + if [[ -n $signature_file ]]; then + EXCLUDED_FILES+=" $(basename $signature_file)" + remove_sig_from_export "$signature_file" "$SIG_SOURCE_DIR" + fi +done +EXCLUDED_FILES=$(echo "$EXCLUDED_FILES" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') + +info "Tests to run - $TESTS" + +. /etc/os-release + +if [[ ! -d ./signatures ]]; then + restore_export "$SIG_SOURCE_DIR" + error_exit "need to be in tracee root directory" +fi + +rm -rf ${TRACEE_TMP_DIR:?}/* || error_exit "could not delete $TRACEE_TMP_DIR" + +KERNEL=$(uname -r) +KERNEL_MAJ=$(echo "$KERNEL" | cut -d'.' -f1) + +if [[ $KERNEL_MAJ -lt 5 && "$KERNEL" != *"el8"* ]]; then + restore_export "$SIG_SOURCE_DIR" + info_exit "skip test in kernels < 5.0 (and not RHEL)" +fi + +git config --global --add safe.directory "*" + +print_environment +compile_tracee "E2E_INST_FILES_TO_EXCLUDE=\"$EXCLUDED_FILES\"" + +restore_export "$SIG_SOURCE_DIR" + +anyerror="" + +# Analyze tests + +cleanup + +for TEST in $TESTS; do + + info + info "= TEST: $TEST ==============================================" + info + + if ! special_tests_setup "$TEST"; then + continue + fi + + if ! signature_file=$(find_signature_file "$SIG_SOURCE_DIR" "$TEST"); then + error "No signature file found for $TEST - $signature_file" + anyerror="${anyerror}$TEST," + fi + events=$(extract_events_from_signature_file "$signature_file")",analyze_essentials" + + info "Events to capture - $events" + + # Run tracee to capture events + capture_events_file="$SCRIPT_TMP_DIR/capture-events-$$" + caputre_log_file="$SCRIPT_TMP_DIR/capture-log-$$" + run_tracee "$events" "$capture_events_file" "$caputre_log_file" "$SIG_DIR" "--output option:disable-parse-arguments" + + # Wait for tracee to start + if ! wait_for_tracee "$caputre_log_file"; then + anyerror="${anyerror}$TEST," + continue + fi + + run_test "$TEST" + # Sleep so events can finish processing + sleep 3 + kill_tracee + + if ! check_test "$TEST""_CAPTURE_EVENTS" "$caputre_log_file" ""; then + anyerror="${anyerror}$TEST," + cleanup + continue + fi + + cp $capture_events_file /tmp/$TEST-events.json + cp $capture_log /tmp/$TEST-logs + + info "ANALYZING EVENTS" + + # Run tracee with signatures on captured events + analyze_events_file="$SCRIPT_TMP_DIR/analyze-events-$$" + analyze_log_file="$SCRIPT_TMP_DIR/analyze-log-$$" + run_tracee "$TEST" "$analyze_events_file" "$analyze_log_file" "$SIG_DIR" "--input json:$capture_events_file" + + # Sleep so events can finish processing + # TODO: make analyze mode work with the pid file + sleep 5 + kill_tracee + + if ! check_test "$TEST" "$caputre_log_file $analyze_log_file" "$analyze_events_file"; then + anyerror="${anyerror}$TEST," + fi + cleanup +done + +# Print summary and exit with error if any test failed + +info +if [[ $anyerror != "" ]]; then + info "ALL TESTS: FAILED: ${anyerror::-1}" + exit 1 +fi + +info "ALL TESTS: SUCCESS" + +exit 0 diff --git a/tests/e2e-inst-test.sh b/tests/e2e-inst-test.sh index 6a81401a8abf..fdf47ba5b175 100755 --- a/tests/e2e-inst-test.sh +++ b/tests/e2e-inst-test.sh @@ -4,38 +4,29 @@ # This test is executed by github workflows inside the action runners # -ARCH=$(uname -m) - -TRACEE_STARTUP_TIMEOUT=30 -TRACEE_SHUTDOWN_TIMEOUT=30 -TRACEE_RUN_TIMEOUT=60 -SCRIPT_TMP_DIR=/tmp +SCRIPT_TMP_DIR=/tmp/analyze_test TRACEE_TMP_DIR=/tmp/tracee -# Default test to run if no other is given -TESTS=${INSTTESTS:=VFS_WRITE} - -info_exit() { - echo -n "INFO: " - echo "$@" - exit 0 -} - -info() { - echo -n "INFO: " - echo "$@" -} +SCRIPT_PATH="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" +TESTS_DIR="$SCRIPT_DIR/e2e-inst-signatures/scripts" +SIG_DIR="$SCRIPT_DIR/../dist/e2e-inst-signatures" +SIG_SOURCE_DIR="$SCRIPT_DIR/e2e-inst-signatures/" -error_exit() { - echo -n "ERROR: " - echo "$@" - exit 1 -} +source $SCRIPT_DIR/inst_tests_funcs.sh if [[ $UID -ne 0 ]]; then error_exit "need root privileges" fi +# Default test to run if no other is given +TESTS=${INSTTESTS:=VFS_WRITE} + +# Remove any leading or trailing whitespace +TESTS=$(echo "$TESTS" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') + +info "Tests to run - $TESTS" + . /etc/os-release if [[ ! -d ./signatures ]]; then @@ -51,36 +42,14 @@ if [[ $KERNEL_MAJ -lt 5 && "$KERNEL" != *"el8"* ]]; then info_exit "skip test in kernels < 5.0 (and not RHEL)" fi -SCRIPT_PATH="$(readlink -f "$0")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -TESTS_DIR="$SCRIPT_DIR/e2e-inst-signatures/scripts" -SIG_DIR="$SCRIPT_DIR/../dist/e2e-inst-signatures" - git config --global --add safe.directory "*" -info -info "= ENVIRONMENT =================================================" -info -info "KERNEL: ${KERNEL}" -info "CLANG: $(clang --version)" -info "GO: $(go version)" -info -info "= COMPILING TRACEE ============================================" -info -# make clean # if you want to be extra cautious -set -e -make -j"$(nproc)" all -make e2e-inst-signatures -set +e - -# Check if tracee was built correctly - -if [[ ! -x ./dist/tracee ]]; then - error_exit "could not find tracee executable" -fi +print_environment +compile_tracee "E2E_INST_FILES_TO_EXCLUDE=\"$EXCLUDED_FILES\"" anyerror="" + # Run tests, one by one for TEST in $TESTS; do @@ -89,143 +58,33 @@ for TEST in $TESTS; do info "= TEST: $TEST ==============================================" info - # Some tests might need special setup (like running before tracee) - - case $TEST in - HOOKED_SYSCALL) - if [[ ! -d /lib/modules/${KERNEL}/build ]]; then - info "skip hooked_syscall test, no kernel headers" - continue - fi - if [[ "$KERNEL" == *"amzn"* ]]; then - info "skip hooked_syscall test in amazon linux" - continue - fi - if [[ $ARCH == "aarch64" ]]; then - info "skip hooked_syscall test in aarch64" - continue - fi - if [[ "$VERSION_CODENAME" == "mantic" ]]; then - # https://github.com/aquasecurity/tracee/issues/3628 - info "skip hooked_syscall in mantic 6.5 kernel, broken" - continue - fi - "${TESTS_DIR}"/hooked_syscall.sh - ;; - esac - - # Run tracee - - rm -f $SCRIPT_TMP_DIR/build-$$ - rm -f $SCRIPT_TMP_DIR/tracee-log-$$ - - ./dist/tracee \ - --install-path $TRACEE_TMP_DIR \ - --cache cache-type=mem \ - --cache mem-cache-size=512 \ - --proctree source=both \ - --output option:sort-events \ - --output json:$SCRIPT_TMP_DIR/build-$$ \ - --output option:parse-arguments \ - --log file:$SCRIPT_TMP_DIR/tracee-log-$$ \ - --signatures-dir "$SIG_DIR" \ - --scope comm=echo,mv,ls,tracee,proctreetester,ping,ds_writer,fsnotify_tester,process_execute,tracee-ebpf,writev,set_fs_pwd.sh \ - --dnscache enable \ - --grpc-listen-addr unix:/tmp/tracee.sock \ - --events "$TEST" & - - # Wait tracee to start - - times=0 - timedout=0 - while true; do - times=$((times + 1)) - sleep 1 - if [[ -f $TRACEE_TMP_DIR/tracee.pid ]]; then - info - info "UP AND RUNNING" - info - break - fi - - if [[ $times -gt $TRACEE_STARTUP_TIMEOUT ]]; then - timedout=1 - break - fi - done - - # Tracee failed to start - - if [[ $timedout -eq 1 ]]; then - info - info "$TEST: FAILED. ERRORS:" - info - cat $SCRIPT_TMP_DIR/tracee-log-$$ - - anyerror="${anyerror}$TEST," - continue - fi - - # Allow tracee to start processing events + special_tests_setup "$TEST" + skip_test=$? + if [[ $skip_test -eq 1 ]]; then + continue + fi - sleep 3 - - # Run tests + # Run tracee + events_file="$SCRIPT_TMP_DIR/build-$$" + log_file="$SCRIPT_TMP_DIR/tracee-log-$$" + run_tracee "$TEST" "$events_file" "$log_file" "$SIG_DIR" + + # Wait for tracee to start + if ! wait_for_tracee "$log_file"; then + anyerror="${anyerror}$TEST," + continue + fi - case $TEST in - HOOKED_SYSCALL) - # wait for tracee hooked event to be processed - sleep 15 - ;; - *) - timeout --preserve-status $TRACEE_RUN_TIMEOUT "${TESTS_DIR}"/"${TEST,,}".sh - ;; - esac + run_test "$TEST" - # So events can finish processing + # Sleep so events can finish processing sleep 3 - - # The cleanup happens at EXIT - - logfile=$SCRIPT_TMP_DIR/tracee-log-$$ - - # Check if the test has failed or not - - found=0 - cat $SCRIPT_TMP_DIR/build-$$ | jq .eventName | grep -q "$TEST" && found=1 - errors=$(cat $logfile | wc -l 2>/dev/null) - - if [[ $TEST == "BPF_ATTACH" ]]; then - errors=0 - fi - - info - if [[ $found -eq 1 && $errors -eq 0 ]]; then - info "$TEST: SUCCESS" - else - anyerror="${anyerror}$TEST," - info "$TEST: FAILED, stderr from tracee:" - cat $SCRIPT_TMP_DIR/tracee-log-$$ - info "$TEST: FAILED, events from tracee:" - cat $SCRIPT_TMP_DIR/build-$$ - info - fi - info - - rm -f $SCRIPT_TMP_DIR/build-$$ - rm -f $SCRIPT_TMP_DIR/tracee-log-$$ - - # Make sure we exit tracee to start it again - - pid_tracee=$(pidof tracee | cut -d' ' -f1) - kill -SIGINT "$pid_tracee" - sleep $TRACEE_SHUTDOWN_TIMEOUT - kill -SIGKILL "$pid_tracee" >/dev/null 2>&1 - sleep 3 - - # Cleanup leftovers - rm -rf $TRACEE_TMP_DIR + kill_tracee + if ! check_test "$TEST" "$log_file" "$events_file"; then + anyerror="${anyerror}$TEST," + fi + cleanup done # Print summary and exit with error if any test failed diff --git a/tests/inst_tests_funcs.sh b/tests/inst_tests_funcs.sh new file mode 100644 index 000000000000..899d904c1a77 --- /dev/null +++ b/tests/inst_tests_funcs.sh @@ -0,0 +1,336 @@ +ARCH=$(uname -m) + +TRACEE_STARTUP_TIMEOUT=30 +TRACEE_SHUTDOWN_TIMEOUT=30 +TRACEE_RUN_TIMEOUT=60 +SCRIPT_TMP_DIR=/tmp/analyze_test +TRACEE_TMP_DIR=/tmp/tracee + +TESTS_DIR="$SCRIPT_DIR/e2e-inst-signatures/scripts" + +KERNEL=$(uname -r) + +info_exit() { + echo -n "INFO: " + echo "$@" + exit 0 +} + +info() { + echo -n "INFO: " + echo "$@" +} + +error_exit() { + echo -n "ERROR: " + echo "$@" + exit 1 +} + +print_environment() { + info + info "= ENVIRONMENT =================================================" + info + info "KERNEL: ${KERNEL}" + info "CLANG: $(clang --version)" + info "GO: $(go version)" +} + +compile_tracee() { + info + info "= COMPILING TRACEE ============================================" + info + # make clean # if you want to be extra cautious + set -e + make -j"$(nproc)" all + make e2e-inst-signatures "$@" + set +e + + # Check if tracee was built correctly + + if [[ ! -x ./dist/tracee ]]; then + error_exit "could not find tracee executable" + fi +} + +# Function: run_tracee +# Description: +# Runs the tracee program with the specified events and flags. +# Assumes that runs from Tracee's root directory. +# Parameters: +# - events: The events to trace. +# - flags: Additional flags to pass to the tracee program. +# - output_file: The output file to save the tracee logs. If not provided, the default file will be used. +# - logfile: The logfile to save the tracee logs. If not provided, the default logfile will be used. +# Returns: None +run_tracee() { + local events=$1 + local output_file=$2 + local logfile=$3 + local sig_dir=$4 + local flags=$5 + + rm -f $output_file + rm -f $logfile + ./dist/tracee \ + --install-path $TRACEE_TMP_DIR \ + --cache cache-type=mem \ + --cache mem-cache-size=512 \ + --proctree source=both \ + --output option:sort-events \ + --output json:$output_file \ + --log file:$logfile \ + --log debug \ + --signatures-dir "$sig_dir" \ + --scope comm=echo,mv,ls,tracee,proctreetester,ping,ds_writer,fsnotify_tester,process_execute,tracee-ebpf,writev,set_fs_pwd.sh \ + --dnscache enable \ + --grpc-listen-addr unix:/tmp/tracee.sock \ + --events "$events" \ + $flags & +} + +# Function: wait_for_tracee +# +# Description: +# This function waits for the Tracee process to start by checking the existence of the tracee.pid file in the TRACEE_TMP_DIR. +# It waits for a maximum of TRACEE_STARTUP_TIMEOUT seconds for the Tracee process to start. +# If the Tracee process starts within the timeout period, it prints a success message and returns. +# If the Tracee process fails to start within the timeout period, it prints an error message and returns with a non-zero exit code. +# +# Parameters: +# - logfile: The path to the log file where the error messages will be written. +# +# Returns: +# 0 if the Tracee process starts successfully +# 1 if the Tracee process fails to start within the timeout period +wait_for_tracee() { + local logfile=$1 + times=0 + timedout=0 + while true; do + times=$((times + 1)) + sleep 1 + if [[ -f $TRACEE_TMP_DIR/tracee.pid ]]; then + info + info "UP AND RUNNING" + info + break + fi + + if [[ $times -gt $TRACEE_STARTUP_TIMEOUT ]]; then + timedout=1 + break + fi + done + + # Tracee failed to start + if [[ $timedout -eq 1 ]]; then + info + info "$TEST: FAILED. ERRORS:" + info + cat $logfile + + return 1 + fi + + # Allow tracee to start processing events + sleep 3 +} + +# Function: check_test +# Description: Checks if a test has failed or not. If the test has failed, it prints the stderr from tracee. +# Parameters: +# - test: The name of the test to check. +# - logfiles_raw: The logfiles to check, seperated by spaces. +# - events_file: The file containing the events to check. +# Returns: 0 if the test is successful, 1 if the test fails +check_test() { + local test=$1 + local logfiles_raw=$2 + local events_file=$3 + + # Split the string into an array + IFS=' ' read -r -a logfiles <<< "$logfiles_raw" + + + # Check if the test has failed or not + found=0 + errors=0 + if [[ -f $events_file ]]; then + cat $events_file | jq .eventName | grep -q "$test" && found=1 + else + found=1 + fi + + + for logfile in "${logfiles[@]}"; do + errors=$(($errors + $(grep -cE "ERROR|FATAL" $logfile))) + done + + if [[ $test == "BPF_ATTACH" ]]; then + errors=0 + fi + + info + if [[ $found -eq 1 && $errors -eq 0 ]]; then + info "$test: SUCCESS" + else + info "$test: FAILED" + if [[ $found -ne 1 ]]; then + info "No events found for $test" + fi + if [[ $errors -ne 0 ]]; then + info "Errors found in logfiles" + fi + info "logs from tracee:" + for logfile in "${logfiles[@]}"; do + info "LOGFILE $logfile:" + if [[ ! -s "$logfile" ]]; then + info "No log from logfile $logfile" + else + cat "$logfile" + fi + done + info + return 1 + fi + info +} + +# Function: kill_tracee +# Description: Kills the tracee process to ensure it is exited and can be started again. +# Parameters: None +# Returns: None +kill_tracee() { + pid_tracee=$(pidof tracee | cut -d' ' -f1) + if [[ -z "$pid_tracee" ]]; then + return + fi + kill -SIGINT "$pid_tracee" + sleep $TRACEE_SHUTDOWN_TIMEOUT + kill -SIGKILL "$pid_tracee" >/dev/null 2>&1 + sleep 3 +} + +# Function: cleanup +# Description: Cleans up any leftovers from the test. +# Parameters: None +# Returns: None +cleanup() { + rm -rf $SCRIPT_TMP_DIR + rm -rf $TRACEE_TMP_DIR +} + +# Some tests might need special setup (like running before tracee) +special_tests_setup() { + local test=$1 + local skip=0 + case $test in + HOOKED_SYSCALL) + if [[ ! -d /lib/modules/${KERNEL}/build ]]; then + info "skip hooked_syscall test, no kernel headers" + skip=1 + elif [[ "$KERNEL" == *"amzn"* ]]; then + info "skip hooked_syscall test in amazon linux" + skip=1 + elif [[ $ARCH == "aarch64" ]]; then + info "skip hooked_syscall test in aarch64" + skip=1 + elif [[ "$VERSION_CODENAME" == "mantic" ]]; then + # https://github.com/aquasecurity/tracee/issues/3628 + info "skip hooked_syscall in mantic 6.5 kernel, broken" + skip=1 + else + "${TESTS_DIR}"/hooked_syscall.sh + fi + ;; + esac + return $skip +} + +# Function to run a specific test. +# Parameters: +# - test: The name of the test to run. +# Returns: None. +run_test() { + local test=$1 + + # Check the type of test. + case $test in + HOOKED_SYSCALL) + # If the test is HOOKED_SYSCALL, wait for the tracee hooked event to be processed. + sleep 15 + ;; + *) + # For other tests, run the corresponding script with a timeout. + timeout --preserve-status $TRACEE_RUN_TIMEOUT "${TESTS_DIR}"/"${test,,}".sh + ;; + esac +} + +# Define the function +extract_events_from_signature_file() { + local file="$1" + local matches + + # Use grep to find all matches and print the match group + matches=$(grep -oP 'Source:\s+"tracee",\s+Name:\s+"(.*?)"' "$file" | awk -F 'Name: "' '{print $2}' | awk -F '"' '{print $1}') + + # Convert matches to a slice format + local slice="" + for match in $matches; do + slice+="$match," + done + + # Remove the trailing comma and space, then add the closing brace + slice=${slice%,} + + # Print the resulting slice + echo "$slice" +} + +find_signature_file() { + local directory="$1" + local signature_name="$2" + local file + + # Search for the string in files within the directory + file=$(grep -rl "\"$signature_name\"" "$directory") + + # Check if a file was found + if [ -n "$file" ]; then + # Return the path of the first file found + echo "$file" + else + # If no file was found, print an error message + echo "No file contains the string: $signature_name" + return 1 + fi +} + +remove_sig_from_export() { + local signature_file="$1" + local signature_dir="$2" + + # Get the name of the type created in the Go file + type_name=$(grep -oP '(?<=type )e2e\w+' "$signature_file" | head -n 1) + + # Remove the line that initializes the type in export.go + sed -i "/&$type_name{}/d" "$signature_dir/export.go" +} + +backup_export() { + local signature_dir="$1" + # Backup the export file + cp "$signature_dir/export.go" "$SCRIPT_TMP_DIR/export.go.bak" +} + +restore_export() { + local signature_dir="$1" + # Restore the export file from backup if it exists + if [ -f "$SCRIPT_TMP_DIR/export.go.bak" ]; then + cp "$SCRIPT_TMP_DIR/export.go.bak" "$signature_dir/export.go" + else + info "No backup export file found" + fi +} \ No newline at end of file