From 539eaeaaa761f833a82a018706bc387b9b8770ad Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 17 Oct 2023 19:15:59 +0200 Subject: [PATCH 01/40] add support for multiple log targets to LogWatch --- logwatch/logwatch.go | 218 ++++++++++++++++++++++------ logwatch/logwatch_handlers.go | 173 ++++++++++++++++++++++ logwatch/logwatch_helpers.go | 40 +++++ logwatch/logwatch_test.go | 6 +- logwatch/logwatch_user_loki_test.go | 4 +- logwatch/logwatch_user_test.go | 13 +- 6 files changed, 401 insertions(+), 53 deletions(-) create mode 100644 logwatch/logwatch_handlers.go create mode 100644 logwatch/logwatch_helpers.go diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 6cd0d580d..4df6afc5f 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -2,12 +2,12 @@ package logwatch import ( "context" + "os" "regexp" "strings" "testing" - "time" - "github.com/prometheus/common/model" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/smartcontractkit/wasp" "github.com/testcontainers/testcontainers-go" @@ -15,6 +15,8 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" ) +const NO_TEST = "no_test" + // LogNotification notification about log line match for some container type LogNotification struct { Container string @@ -25,30 +27,101 @@ type LogNotification struct { // LogWatch is a test helper struct to monitor docker container logs for some patterns // and push their logs into Loki for further analysis type LogWatch struct { - t *testing.T - log zerolog.Logger - loki *wasp.LokiClient - patterns map[string][]*regexp.Regexp - notifyTest chan *LogNotification - containers []testcontainers.Container - consumers map[string]*ContainerLogConsumer -} - -// NewLogWatch creates a new LogWatch instance, with a Loki client -func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp) (*LogWatch, error) { - loki, err := wasp.NewLokiClient(wasp.NewEnvLokiConfig()) - if err != nil { + testName string + log zerolog.Logger + loki *wasp.LokiClient + patterns map[string][]*regexp.Regexp + notifyTest chan *LogNotification + containers []testcontainers.Container + consumers map[string]*ContainerLogConsumer + logTargetHandlers map[LogTarget]HandleLogTarget +} + +type LogContent struct { + TestName string + ContainerName string + Content []byte +} + +type LogWatchOption func(*LogWatch) + +// NewLogWatch creates a new LogWatch instance, with Loki client only if Loki log target is enabled (lazy init) +func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ...LogWatchOption) (*LogWatch, error) { + l := logging.GetLogger(nil, "LOGWATCH_LOG_LEVEL").With().Str("Component", "LogWatch").Logger() + var testName string + if t == nil { + testName = NO_TEST + } else { + testName = t.Name() + } + + logWatch := &LogWatch{ + testName: testName, + log: l, + patterns: patterns, + notifyTest: make(chan *LogNotification, 10000), + consumers: make(map[string]*ContainerLogConsumer, 0), + logTargetHandlers: getDefaultLogHandlers(), + } + + for _, option := range options { + option(logWatch) + } + + if err := logWatch.validateLogTargets(); err != nil { return nil, err } - l := logging.GetLogger(t, "LOGWATCH_LOG_LEVEL").With().Str("Component", "LogWatch").Logger() - return &LogWatch{ - t: t, - log: l, - loki: loki, - patterns: patterns, - notifyTest: make(chan *LogNotification, 10000), - consumers: make(map[string]*ContainerLogConsumer, 0), - }, nil + + return logWatch, nil +} + +func (l *LogWatch) validateLogTargets() error { + envLogTargets, err := getLogTargetsFromEnv() + if err != nil { + return err + } + + // check if all requested log targets are supported + for _, wantedTarget := range envLogTargets { + found := false + for knownTargets := range l.logTargetHandlers { + if knownTargets == wantedTarget { + found = true + break + } + } + + if !found { + return errors.Errorf("no handler found for log target: %d", wantedTarget) + } + } + + // deactivate known log targets that are not enabled + for knownTarget := range l.logTargetHandlers { + wanted := false + for _, wantedTarget := range envLogTargets { + if knownTarget == wantedTarget { + wanted = true + break + } + } + if !wanted { + l.log.Debug().Int("handler id", int(knownTarget)).Msg("Log target disabled") + delete(l.logTargetHandlers, knownTarget) + } + } + + if len(l.logTargetHandlers) == 0 { + l.log.Warn().Msg("No log targets enabled. LogWatch will not do anything") + } + + return nil +} + +func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) LogWatchOption { + return func(lw *LogWatch) { + lw.logTargetHandlers[logTarget] = handler + } } // Listen listen for the next notification @@ -76,19 +149,26 @@ func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { } // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer -func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string, pushToLoki bool) error { +func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { name, err := container.Name(ctx) if err != nil { return err } name = strings.Replace(name, "/", "", 1) prefix = strings.Replace(prefix, "/", "", 1) + + enabledLogTargets := make([]LogTarget, 0) + for logTarget := range m.logTargetHandlers { + enabledLogTargets = append(enabledLogTargets, logTarget) + } + var cons *ContainerLogConsumer if prefix != "" { - cons = newContainerLogConsumer(m, name, prefix, pushToLoki) + cons = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) } else { - cons = newContainerLogConsumer(m, name, name, pushToLoki) + cons = newContainerLogConsumer(m, name, name, enabledLogTargets...) } + m.log.Info(). Str("Prefix", prefix). Str("Name", name). @@ -101,18 +181,35 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainer // Shutdown disconnects all containers, stops notifications func (m *LogWatch) Shutdown() { - m.loki.Stop() + for _, c := range m.containers { + m.DisconnectContainer(c) + } + + if m.loki != nil { + m.loki.Stop() + } +} + +func (m *LogWatch) PrintLogTargetsLocations() { + for _, handler := range m.logTargetHandlers { + handler.PrintLogLocation(m) + } } // DisconnectContainer disconnects the particular container func (m *LogWatch) DisconnectContainer(container testcontainers.Container) { if container.IsRunning() { + m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") _ = container.StopLogProducer() } } // ContainerLogs return all logs for the particular container func (m *LogWatch) ContainerLogs(name string) []string { + if _, ok := m.consumers[name]; !ok { + return []string{} + } + return m.consumers[name].Messages } @@ -141,19 +238,19 @@ func (m *LogWatch) PrintAll() { type ContainerLogConsumer struct { name string prefix string - pushToLoki bool + logTargets []LogTarget lw *LogWatch Messages []string } // newContainerLogConsumer creates new log consumer for a container that // - signal if log line matches the pattern -// - push all lines to Loki if enabled -func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, pushToLoki bool) *ContainerLogConsumer { +// - push all lines to configured log targets +func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, logTargets ...LogTarget) *ContainerLogConsumer { return &ContainerLogConsumer{ name: containerName, prefix: prefix, - pushToLoki: pushToLoki, + logTargets: logTargets, lw: lw, Messages: make([]string, 0), } @@ -166,19 +263,21 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { for i := 0; i < matches; i++ { g.lw.notifyTest <- &LogNotification{Container: g.name, Prefix: g.prefix, Log: string(l.Content)} } - var testName string - if g.lw.t == nil { - testName = "no_test" - } else { - testName = g.lw.t.Name() + + content := LogContent{ + TestName: g.lw.testName, + ContainerName: g.name, + Content: l.Content, } - // we can notify more than one time if it matches, but we push only once - if g.pushToLoki && g.lw.loki != nil { - _ = g.lw.loki.Handle(model.LabelSet{ - "type": "log_watch", - "test": model.LabelValue(testName), - "container": model.LabelValue(g.name), - }, time.Now(), string(l.Content)) + + for _, logTarget := range g.logTargets { + if handler, ok := g.lw.logTargetHandlers[logTarget]; ok { + if err := handler.Handle(g, content); err != nil { + g.lw.log.Error().Err(err).Msg("Failed to handle log target") + } + } else { + g.lw.log.Warn().Int("handler id", int(logTarget)).Msg("No handler found for log target") + } } } @@ -201,3 +300,34 @@ func (g *ContainerLogConsumer) FindMatch(l testcontainers.Log) int { } return matchesPerPattern } + +func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { + for _, lt := range g.logTargets { + if lt&logTarget != 0 { + return true + } + } + + return false +} + +func getLogTargetsFromEnv() ([]LogTarget, error) { + envLogTargetsValue := os.Getenv("LOGWATCH_LOG_TARGETS") + if envLogTargetsValue != "" { + envLogTargets := make([]LogTarget, 0) + for _, target := range strings.Split(envLogTargetsValue, ",") { + switch strings.TrimSpace(strings.ToLower(target)) { + case "loki": + envLogTargets = append(envLogTargets, Loki) + case "file": + envLogTargets = append(envLogTargets, File) + default: + return []LogTarget{}, errors.Errorf("unknown log target: %s", target) + } + } + + return envLogTargets, nil + } + + return []LogTarget{}, nil +} diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go new file mode 100644 index 000000000..741b53514 --- /dev/null +++ b/logwatch/logwatch_handlers.go @@ -0,0 +1,173 @@ +package logwatch + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/smartcontractkit/wasp" +) + +type LogTarget int + +const ( + Loki LogTarget = 1 << iota + File +) + +type HandleLogTarget interface { + Handle(*ContainerLogConsumer, LogContent) error + PrintLogLocation(*LogWatch) +} + +func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { + handlers := make(map[LogTarget]HandleLogTarget) + handlers[Loki] = LokiLogHandler{ + shouldSkipLogging: make(map[string]bool), + } + handlers[File] = FileLogHandler{ + testLogFolders: make(map[string]string), + shouldSkipLogging: make(map[string]bool), + } + + return handlers +} + +// streams logs to local files +type FileLogHandler struct { + testLogFolders map[string]string + shouldSkipLogging map[string]bool +} + +func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { + if val, ok := h.shouldSkipLogging[content.TestName]; val && ok { + return nil + } + + folder, err := h.getOrCreateLogFolder(content.TestName) + if err != nil { + h.shouldSkipLogging[content.TestName] = true + + return errors.Wrap(err, "failed to create logs folder. File logging stopped") + } + + logFileName := filepath.Join(folder, fmt.Sprintf("%s.log", content.ContainerName)) + logFile, err := os.OpenFile(logFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + h.shouldSkipLogging[content.TestName] = true + + return errors.Wrap(err, "failed to open log file. File logging stopped") + } + + defer logFile.Close() + + if _, err := logFile.WriteString(string(content.Content)); err != nil { + h.shouldSkipLogging[content.TestName] = true + + return errors.Wrap(err, "failed to write to log file. File logging stopped") + } + + return nil +} + +func (h FileLogHandler) PrintLogLocation(l *LogWatch) { + for testname, folder := range h.testLogFolders { + l.log.Info().Str("Test", testname).Str("Folder", folder).Msg("Logs saved to folder:") + } +} + +func (h FileLogHandler) getOrCreateLogFolder(testname string) (string, error) { + var folder string + if _, ok := h.testLogFolders[testname]; !ok { + folder = fmt.Sprintf("./logs/%s-%s", testname, time.Now().Format("2006-01-02T15-04-05")) + if err := os.MkdirAll(folder, os.ModePerm); err != nil { + return "", err + } + h.testLogFolders[testname] = folder + } + folder = h.testLogFolders[testname] + + return folder, nil +} + +// streams logs to Loki +type LokiLogHandler struct { + shouldSkipLogging map[string]bool +} + +func (h LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { + if val, ok := h.shouldSkipLogging[content.TestName]; val && ok { + c.lw.log.Warn().Str("Test", content.TestName).Msg("Skipping pushing logs to Loki for this test") + return nil + } + + if c.lw.loki == nil { + loki, err := wasp.NewLokiClient(wasp.NewEnvLokiConfig()) + if err != nil { + c.lw.log.Error().Err(err).Msg("Failed to create Loki client") + h.shouldSkipLogging[content.TestName] = true + + return err + } + c.lw.loki = loki + } + // we can notify more than one time if it matches, but we push only once + _ = c.lw.loki.Handle(model.LabelSet{ + "type": "log_watch", + "test": model.LabelValue(content.TestName), + "container": model.LabelValue(content.ContainerName), + }, time.Now(), string(content.Content)) + + return nil +} + +func (h LokiLogHandler) PrintLogLocation(l *LogWatch) { + queries := make([]GrafanaExploreQuery, 0) + + rangeFrom := time.Now() + rangeTo := time.Now() + + for _, c := range l.consumers { + if c.hasLogTarget(Loki) { + queries = append(queries, GrafanaExploreQuery{ + refId: c.name, + container: c.name, + }) + } + + // lets find the oldest log message to know when to start the range from + if len(c.Messages) > 0 { + var firstMsg struct { + Ts string `json:"ts"` + } + if err := json.Unmarshal([]byte(c.Messages[0]), &firstMsg); err != nil { + l.log.Error().Err(err).Str("container", c.name).Msg("Failed to unmarshal first log message") + } else { + firstTs, err := time.Parse(time.RFC3339, firstMsg.Ts) + if err != nil { + l.log.Error().Err(err).Str("container", c.name).Msg("Failed to parse first log message timestamp") + } else { + if firstTs.Before(rangeFrom) { + rangeFrom = firstTs + } + } + } + } + } + + grafanaUrl := GrafanaExploreUrl{ + baseurl: os.Getenv("GRAFANA_URL"), + datasource: os.Getenv("GRAFANA_DATASOURCE"), + queries: queries, + rangeFrom: rangeFrom.UnixMilli(), + rangeTo: rangeTo.UnixMilli() + 60000, //just to make sure we get the last message + }.getUrl() + + l.log.Info().Str("URL", string(grafanaUrl)).Msg("Loki logs can be found in Grafana at (will only work when you unescape quotes):") + + fmt.Printf("Loki logs can be found in Grafana at: %s\n", grafanaUrl) +} diff --git a/logwatch/logwatch_helpers.go b/logwatch/logwatch_helpers.go new file mode 100644 index 000000000..a1a3de4d8 --- /dev/null +++ b/logwatch/logwatch_helpers.go @@ -0,0 +1,40 @@ +package logwatch + +import ( + "fmt" + "strings" +) + +type GrafanaExploreUrl struct { + baseurl string + datasource string + queries []GrafanaExploreQuery + rangeFrom int64 + rangeTo int64 +} + +type GrafanaExploreQuery struct { + refId string + container string +} + +func (g GrafanaExploreUrl) getUrl() string { + url := g.baseurl + + if strings.HasSuffix(url, "/") && len(url) > 0 { + url = url[:len(url)-1] + } + + url += "/explore?panes=" + url += "{\"_an\":{\"datasource\":\"" + g.datasource + "\",\"queries\":[" + for i, query := range g.queries { + url += "{\"refId\":\"" + query.refId + "\",\"expr\":\"{container=\\\"" + query.container + "\\\"}\",\"queryType\":\"range\",\"datasource\":{\"type\":\"loki\",\"uid\":\"" + g.datasource + "\"},\"editorMode\":\"builder\",\"hide\":false}" + if i < len(g.queries)-1 { + url += "," + } + } + + url += "],\"range\":{\"from\":\"" + fmt.Sprint(g.rangeFrom) + "\",\"to\":\"" + fmt.Sprint(g.rangeTo) + "\"}}}&schemaVersion=1&orgId=1" + + return url +} diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 896d74b73..7352d2dfc 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -3,6 +3,7 @@ package logwatch_test import ( "context" "fmt" + "os" "reflect" "regexp" "strconv" @@ -26,7 +27,7 @@ type TestCase struct { exitEarly bool mustNotifyList map[string][]*regexp.Regexp expectedNotifications map[string][]*logwatch.LogNotification - pushToLoki bool + logTargets []logwatch.LogTarget } func getNotificationsAmount(m map[string][]*regexp.Regexp) int { @@ -95,6 +96,7 @@ func startTestContainer(containerName string, msg string, amount int, intervalSe } func TestLogWatchDocker(t *testing.T) { + os.Setenv("LOGWATCH_LOG_TARGETS", "loki") tests := []TestCase{ { name: "should read exactly 10 streams (1 container)", @@ -168,7 +170,7 @@ func TestLogWatchDocker(t *testing.T) { require.NoError(t, err) name, err := container.Name(context.Background()) require.NoError(t, err) - err = lw.ConnectContainer(context.Background(), container, name, tc.pushToLoki) + err = lw.ConnectContainer(context.Background(), container, name) require.NoError(t, err) } diff --git a/logwatch/logwatch_user_loki_test.go b/logwatch/logwatch_user_loki_test.go index b685b0f03..84b99cdbf 100644 --- a/logwatch/logwatch_user_loki_test.go +++ b/logwatch/logwatch_user_loki_test.go @@ -1,6 +1,7 @@ package logwatch_test import ( + "os" "testing" "time" @@ -13,6 +14,7 @@ import ( func TestExampleLokiStreaming(t *testing.T) { t.Skip("uncomment and run manually") + os.Setenv("LOGWATCH_LOG_TARGETS", "loki") tests := []testData{ { name: "stream all container logs to Loki, subtest 1", @@ -43,7 +45,7 @@ func TestExampleLokiStreaming(t *testing.T) { require.NoError(t, err) lw, err := logwatch.NewLogWatch(t, nil) require.NoError(t, err) - err = d.ConnectLogs(lw, true) + err = d.ConnectLogs(lw) require.NoError(t, err) time.Sleep(5 * time.Second) }) diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index d21eb4f47..cbd89550b 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -3,6 +3,7 @@ package logwatch_test import ( "context" "fmt" + "os" "regexp" "testing" "time" @@ -51,11 +52,10 @@ func (m *MyDeployment) Shutdown() error { return nil } -/* That's what you need to implement to have your logs in Loki */ - -func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch, pushToLoki bool) error { +/* That's what you need to implement to have your logs send to your chosen targets */ +func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { for _, c := range m.containers { - if err := lw.ConnectContainer(context.Background(), c, "", pushToLoki); err != nil { + if err := lw.ConnectContainer(context.Background(), c, ""); err != nil { return err } } @@ -65,6 +65,7 @@ func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch, pushToLoki bool) error /* That's how you use it */ func TestExampleUserInteraction(t *testing.T) { + os.Setenv("LOGWATCH_LOG_TARGETS", "loki") t.Run("sync API, block, receive one message", func(t *testing.T) { testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} d, err := NewDeployment(testData) @@ -80,7 +81,7 @@ func TestExampleUserInteraction(t *testing.T) { }, ) require.NoError(t, err) - err = d.ConnectLogs(lw, false) + err = d.ConnectLogs(lw) require.NoError(t, err) match := lw.Listen() require.NotEmpty(t, match) @@ -105,7 +106,7 @@ func TestExampleUserInteraction(t *testing.T) { ) require.NoError(t, err) lw.OnMatch(func(ln *logwatch.LogNotification) { notifications++ }) - err = d.ConnectLogs(lw, false) + err = d.ConnectLogs(lw) require.NoError(t, err) time.Sleep(1 * time.Second) require.Equal(t, testData.repeat*len(testData.streams), notifications) From 2e9e09a8464f3c341f26315236940d0f3d1d93b0 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 19 Oct 2023 15:57:44 +0200 Subject: [PATCH 02/40] fix linting errors --- logwatch/logwatch.go | 32 ++++++++++++++++---------------- logwatch/logwatch_test.go | 1 - 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 4df6afc5f..ba8353f2f 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -43,10 +43,10 @@ type LogContent struct { Content []byte } -type LogWatchOption func(*LogWatch) +type Option func(*LogWatch) // NewLogWatch creates a new LogWatch instance, with Loki client only if Loki log target is enabled (lazy init) -func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ...LogWatchOption) (*LogWatch, error) { +func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ...Option) (*LogWatch, error) { l := logging.GetLogger(nil, "LOGWATCH_LOG_LEVEL").With().Str("Component", "LogWatch").Logger() var testName string if t == nil { @@ -118,16 +118,16 @@ func (l *LogWatch) validateLogTargets() error { return nil } -func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) LogWatchOption { +func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { return func(lw *LogWatch) { lw.logTargetHandlers[logTarget] = handler } } // Listen listen for the next notification -func (m *LogWatch) Listen() *LogNotification { - msg := <-m.notifyTest - m.log.Warn(). +func (l *LogWatch) Listen() *LogNotification { + msg := <-l.notifyTest + l.log.Warn(). Str("Container", msg.Container). Str("Line", msg.Log). Msg("Received notification from container") @@ -135,11 +135,11 @@ func (m *LogWatch) Listen() *LogNotification { } // OnMatch calling your testing hook on first match -func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { +func (l *LogWatch) OnMatch(f func(ln *LogNotification)) { go func() { for { - msg := <-m.notifyTest - m.log.Warn(). + msg := <-l.notifyTest + l.log.Warn(). Str("Container", msg.Container). Str("Line", msg.Log). Msg("Received notification from container") @@ -149,7 +149,7 @@ func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { } // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer -func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { +func (l *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { name, err := container.Name(ctx) if err != nil { return err @@ -158,23 +158,23 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainer prefix = strings.Replace(prefix, "/", "", 1) enabledLogTargets := make([]LogTarget, 0) - for logTarget := range m.logTargetHandlers { + for logTarget := range l.logTargetHandlers { enabledLogTargets = append(enabledLogTargets, logTarget) } var cons *ContainerLogConsumer if prefix != "" { - cons = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) + cons = newContainerLogConsumer(l, name, prefix, enabledLogTargets...) } else { - cons = newContainerLogConsumer(m, name, name, enabledLogTargets...) + cons = newContainerLogConsumer(l, name, name, enabledLogTargets...) } - m.log.Info(). + l.log.Info(). Str("Prefix", prefix). Str("Name", name). Msg("Connecting container logs") - m.consumers[name] = cons - m.containers = append(m.containers, container) + l.consumers[name] = cons + l.containers = append(l.containers, container) container.FollowOutput(cons) return container.StartLogProducer(ctx) } diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 7352d2dfc..ff94b2d80 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -27,7 +27,6 @@ type TestCase struct { exitEarly bool mustNotifyList map[string][]*regexp.Regexp expectedNotifications map[string][]*logwatch.LogNotification - logTargets []logwatch.LogTarget } func getNotificationsAmount(m map[string][]*regexp.Regexp) int { From d87c27ac0cc682ec254d6475a48f12d98c93fa2f Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 19 Oct 2023 16:37:24 +0200 Subject: [PATCH 03/40] fix lints and unit tests (do not try to send anything to Loki) --- logwatch/logwatch.go | 40 +++++++++++++++++----------------- logwatch/logwatch_handlers.go | 14 ++++++------ logwatch/logwatch_test.go | 2 +- logwatch/logwatch_user_test.go | 2 +- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index ba8353f2f..905fad4ae 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -75,7 +75,7 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... return logWatch, nil } -func (l *LogWatch) validateLogTargets() error { +func (m *LogWatch) validateLogTargets() error { envLogTargets, err := getLogTargetsFromEnv() if err != nil { return err @@ -84,7 +84,7 @@ func (l *LogWatch) validateLogTargets() error { // check if all requested log targets are supported for _, wantedTarget := range envLogTargets { found := false - for knownTargets := range l.logTargetHandlers { + for knownTargets := range m.logTargetHandlers { if knownTargets == wantedTarget { found = true break @@ -97,7 +97,7 @@ func (l *LogWatch) validateLogTargets() error { } // deactivate known log targets that are not enabled - for knownTarget := range l.logTargetHandlers { + for knownTarget := range m.logTargetHandlers { wanted := false for _, wantedTarget := range envLogTargets { if knownTarget == wantedTarget { @@ -106,13 +106,13 @@ func (l *LogWatch) validateLogTargets() error { } } if !wanted { - l.log.Debug().Int("handler id", int(knownTarget)).Msg("Log target disabled") - delete(l.logTargetHandlers, knownTarget) + m.log.Debug().Int("handler id", int(knownTarget)).Msg("Log target disabled") + delete(m.logTargetHandlers, knownTarget) } } - if len(l.logTargetHandlers) == 0 { - l.log.Warn().Msg("No log targets enabled. LogWatch will not do anything") + if len(m.logTargetHandlers) == 0 { + m.log.Warn().Msg("No log targets enabled. LogWatch will not do anything") } return nil @@ -125,9 +125,9 @@ func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { } // Listen listen for the next notification -func (l *LogWatch) Listen() *LogNotification { - msg := <-l.notifyTest - l.log.Warn(). +func (m *LogWatch) Listen() *LogNotification { + msg := <-m.notifyTest + m.log.Warn(). Str("Container", msg.Container). Str("Line", msg.Log). Msg("Received notification from container") @@ -135,11 +135,11 @@ func (l *LogWatch) Listen() *LogNotification { } // OnMatch calling your testing hook on first match -func (l *LogWatch) OnMatch(f func(ln *LogNotification)) { +func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { go func() { for { - msg := <-l.notifyTest - l.log.Warn(). + msg := <-m.notifyTest + m.log.Warn(). Str("Container", msg.Container). Str("Line", msg.Log). Msg("Received notification from container") @@ -149,7 +149,7 @@ func (l *LogWatch) OnMatch(f func(ln *LogNotification)) { } // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer -func (l *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { +func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { name, err := container.Name(ctx) if err != nil { return err @@ -158,23 +158,23 @@ func (l *LogWatch) ConnectContainer(ctx context.Context, container testcontainer prefix = strings.Replace(prefix, "/", "", 1) enabledLogTargets := make([]LogTarget, 0) - for logTarget := range l.logTargetHandlers { + for logTarget := range m.logTargetHandlers { enabledLogTargets = append(enabledLogTargets, logTarget) } var cons *ContainerLogConsumer if prefix != "" { - cons = newContainerLogConsumer(l, name, prefix, enabledLogTargets...) + cons = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) } else { - cons = newContainerLogConsumer(l, name, name, enabledLogTargets...) + cons = newContainerLogConsumer(m, name, name, enabledLogTargets...) } - l.log.Info(). + m.log.Info(). Str("Prefix", prefix). Str("Name", name). Msg("Connecting container logs") - l.consumers[name] = cons - l.containers = append(l.containers, container) + m.consumers[name] = cons + m.containers = append(m.containers, container) container.FollowOutput(cons) return container.StartLogProducer(ctx) } diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 741b53514..aca7b7cff 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -74,9 +74,9 @@ func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro return nil } -func (h FileLogHandler) PrintLogLocation(l *LogWatch) { +func (h FileLogHandler) PrintLogLocation(m *LogWatch) { for testname, folder := range h.testLogFolders { - l.log.Info().Str("Test", testname).Str("Folder", folder).Msg("Logs saved to folder:") + m.log.Info().Str("Test", testname).Str("Folder", folder).Msg("Logs saved to folder:") } } @@ -125,13 +125,13 @@ func (h LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro return nil } -func (h LokiLogHandler) PrintLogLocation(l *LogWatch) { +func (h LokiLogHandler) PrintLogLocation(m *LogWatch) { queries := make([]GrafanaExploreQuery, 0) rangeFrom := time.Now() rangeTo := time.Now() - for _, c := range l.consumers { + for _, c := range m.consumers { if c.hasLogTarget(Loki) { queries = append(queries, GrafanaExploreQuery{ refId: c.name, @@ -145,11 +145,11 @@ func (h LokiLogHandler) PrintLogLocation(l *LogWatch) { Ts string `json:"ts"` } if err := json.Unmarshal([]byte(c.Messages[0]), &firstMsg); err != nil { - l.log.Error().Err(err).Str("container", c.name).Msg("Failed to unmarshal first log message") + m.log.Error().Err(err).Str("container", c.name).Msg("Failed to unmarshal first log message") } else { firstTs, err := time.Parse(time.RFC3339, firstMsg.Ts) if err != nil { - l.log.Error().Err(err).Str("container", c.name).Msg("Failed to parse first log message timestamp") + m.log.Error().Err(err).Str("container", c.name).Msg("Failed to parse first log message timestamp") } else { if firstTs.Before(rangeFrom) { rangeFrom = firstTs @@ -167,7 +167,7 @@ func (h LokiLogHandler) PrintLogLocation(l *LogWatch) { rangeTo: rangeTo.UnixMilli() + 60000, //just to make sure we get the last message }.getUrl() - l.log.Info().Str("URL", string(grafanaUrl)).Msg("Loki logs can be found in Grafana at (will only work when you unescape quotes):") + m.log.Info().Str("URL", string(grafanaUrl)).Msg("Loki logs can be found in Grafana at (will only work when you unescape quotes):") fmt.Printf("Loki logs can be found in Grafana at: %s\n", grafanaUrl) } diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index ff94b2d80..377aa0723 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -95,7 +95,7 @@ func startTestContainer(containerName string, msg string, amount int, intervalSe } func TestLogWatchDocker(t *testing.T) { - os.Setenv("LOGWATCH_LOG_TARGETS", "loki") + os.Setenv("LOGWATCH_LOG_TARGETS", "") tests := []TestCase{ { name: "should read exactly 10 streams (1 container)", diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index cbd89550b..8b041ce09 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -65,7 +65,7 @@ func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { /* That's how you use it */ func TestExampleUserInteraction(t *testing.T) { - os.Setenv("LOGWATCH_LOG_TARGETS", "loki") + os.Setenv("LOGWATCH_LOG_TARGETS", "") t.Run("sync API, block, receive one message", func(t *testing.T) { testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} d, err := NewDeployment(testData) From d2b9366f34148975d4eba038a2f527219bba477c Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 19 Oct 2023 18:27:10 +0200 Subject: [PATCH 04/40] use string constants instead of a bitmap for log targets --- logwatch/logwatch.go | 10 +++++----- logwatch/logwatch_handlers.go | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 905fad4ae..355b82ff9 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -84,8 +84,8 @@ func (m *LogWatch) validateLogTargets() error { // check if all requested log targets are supported for _, wantedTarget := range envLogTargets { found := false - for knownTargets := range m.logTargetHandlers { - if knownTargets == wantedTarget { + for knownTarget := range m.logTargetHandlers { + if knownTarget == wantedTarget { found = true break } @@ -106,7 +106,7 @@ func (m *LogWatch) validateLogTargets() error { } } if !wanted { - m.log.Debug().Int("handler id", int(knownTarget)).Msg("Log target disabled") + m.log.Debug().Str("log target", string(knownTarget)).Msg("Log target disabled") delete(m.logTargetHandlers, knownTarget) } } @@ -276,7 +276,7 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { g.lw.log.Error().Err(err).Msg("Failed to handle log target") } } else { - g.lw.log.Warn().Int("handler id", int(logTarget)).Msg("No handler found for log target") + g.lw.log.Warn().Str("log target", string(logTarget)).Msg("No handler found for log target") } } } @@ -303,7 +303,7 @@ func (g *ContainerLogConsumer) FindMatch(l testcontainers.Log) int { func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { for _, lt := range g.logTargets { - if lt&logTarget != 0 { + if lt == logTarget { return true } } diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index aca7b7cff..4fd8d367d 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -12,11 +12,11 @@ import ( "github.com/smartcontractkit/wasp" ) -type LogTarget int +type LogTarget string const ( - Loki LogTarget = 1 << iota - File + Loki LogTarget = "loki" + File LogTarget = "file" ) type HandleLogTarget interface { From 5180c36191a2f2692ce1ea8ca7e5605ac1e30012 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Fri, 20 Oct 2023 10:22:15 +0200 Subject: [PATCH 05/40] fix lint --- logwatch/logwatch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 355b82ff9..cdc809825 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -92,7 +92,7 @@ func (m *LogWatch) validateLogTargets() error { } if !found { - return errors.Errorf("no handler found for log target: %d", wantedTarget) + return errors.Errorf("no handler found for log target: %s", wantedTarget) } } From 158ed87c5d2040ce21d8284571d5acb233c3896d Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 24 Oct 2023 10:11:04 +0200 Subject: [PATCH 06/40] store log location data on disk and add a command to print it --- .gitignore | 4 +- logwatch/logwatch.go | 20 +++- logwatch/logwatch_handlers.go | 103 +++++++++++---------- testsummary/cmd/internal/print_commands.go | 67 ++++++++++++++ testsummary/cmd/main.go | 26 ++++++ testsummary/summary.go | 76 +++++++++++++++ 6 files changed, 247 insertions(+), 49 deletions(-) create mode 100644 testsummary/cmd/internal/print_commands.go create mode 100644 testsummary/cmd/main.go create mode 100644 testsummary/summary.go diff --git a/.gitignore b/.gitignore index 2ddd1b7d7..28d9977ae 100644 --- a/.gitignore +++ b/.gitignore @@ -54,4 +54,6 @@ docs/Gemfile.lock dist/ **/remote_runner_config.yaml -logs/ \ No newline at end of file +logs/ + +test_summary.json \ No newline at end of file diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index cdc809825..ed3a269c3 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -190,9 +190,27 @@ func (m *LogWatch) Shutdown() { } } +type LogWriter = func(testName string, name string, location interface{}) error + func (m *LogWatch) PrintLogTargetsLocations() { + m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { + m.log.Info().Str("Test", testName).Str("Handler", name).Interface("Location", location).Msg("Log location") + return nil + }) +} + +func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { for _, handler := range m.logTargetHandlers { - handler.PrintLogLocation(m) + name := string(handler.GetTarget()) + location, err := handler.GetLogLocation(m.consumers) + if err != nil { + m.log.Error().Str("Handler", name).Err(err).Msg("Failed to get log location") + continue + } + + if err := writer(m.testName, name, location); err != nil { + m.log.Error().Str("Handler", name).Err(err).Msg("Failed to write log location") + } } } diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 4fd8d367d..7b0d7ce0b 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -21,36 +21,33 @@ const ( type HandleLogTarget interface { Handle(*ContainerLogConsumer, LogContent) error - PrintLogLocation(*LogWatch) + // PrintLogLocation(*LogWatch) + GetLogLocation(map[string]*ContainerLogConsumer) (string, error) + GetTarget() LogTarget } func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { handlers := make(map[LogTarget]HandleLogTarget) - handlers[Loki] = LokiLogHandler{ - shouldSkipLogging: make(map[string]bool), - } - handlers[File] = FileLogHandler{ - testLogFolders: make(map[string]string), - shouldSkipLogging: make(map[string]bool), - } + handlers[Loki] = &LokiLogHandler{} + handlers[File] = &FileLogHandler{} return handlers } // streams logs to local files type FileLogHandler struct { - testLogFolders map[string]string - shouldSkipLogging map[string]bool + logFolder string + shouldSkipLogging bool } -func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { - if val, ok := h.shouldSkipLogging[content.TestName]; val && ok { +func (h *FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { + if h.shouldSkipLogging { return nil } folder, err := h.getOrCreateLogFolder(content.TestName) if err != nil { - h.shouldSkipLogging[content.TestName] = true + h.shouldSkipLogging = true return errors.Wrap(err, "failed to create logs folder. File logging stopped") } @@ -58,7 +55,7 @@ func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro logFileName := filepath.Join(folder, fmt.Sprintf("%s.log", content.ContainerName)) logFile, err := os.OpenFile(logFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { - h.shouldSkipLogging[content.TestName] = true + h.shouldSkipLogging = true return errors.Wrap(err, "failed to open log file. File logging stopped") } @@ -66,7 +63,7 @@ func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro defer logFile.Close() if _, err := logFile.WriteString(string(content.Content)); err != nil { - h.shouldSkipLogging[content.TestName] = true + h.shouldSkipLogging = true return errors.Wrap(err, "failed to write to log file. File logging stopped") } @@ -74,33 +71,34 @@ func (h FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro return nil } -func (h FileLogHandler) PrintLogLocation(m *LogWatch) { - for testname, folder := range h.testLogFolders { - m.log.Info().Str("Test", testname).Str("Folder", folder).Msg("Logs saved to folder:") - } +func (h FileLogHandler) GetLogLocation(_ map[string]*ContainerLogConsumer) (string, error) { + return h.logFolder, nil } -func (h FileLogHandler) getOrCreateLogFolder(testname string) (string, error) { - var folder string - if _, ok := h.testLogFolders[testname]; !ok { - folder = fmt.Sprintf("./logs/%s-%s", testname, time.Now().Format("2006-01-02T15-04-05")) +func (h *FileLogHandler) getOrCreateLogFolder(testname string) (string, error) { + if h.logFolder == "" { + folder := fmt.Sprintf("./logs/%s-%s", testname, time.Now().Format("2006-01-02T15-04-05")) if err := os.MkdirAll(folder, os.ModePerm); err != nil { return "", err } - h.testLogFolders[testname] = folder + h.logFolder = folder } - folder = h.testLogFolders[testname] - return folder, nil + return h.logFolder, nil +} + +func (h FileLogHandler) GetTarget() LogTarget { + return File } // streams logs to Loki type LokiLogHandler struct { - shouldSkipLogging map[string]bool + grafanaUrl string + shouldSkipLogging bool } -func (h LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { - if val, ok := h.shouldSkipLogging[content.TestName]; val && ok { +func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { + if h.shouldSkipLogging { c.lw.log.Warn().Str("Test", content.TestName).Msg("Skipping pushing logs to Loki for this test") return nil } @@ -109,7 +107,7 @@ func (h LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro loki, err := wasp.NewLokiClient(wasp.NewEnvLokiConfig()) if err != nil { c.lw.log.Error().Err(err).Msg("Failed to create Loki client") - h.shouldSkipLogging[content.TestName] = true + h.shouldSkipLogging = true return err } @@ -125,13 +123,17 @@ func (h LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) erro return nil } -func (h LokiLogHandler) PrintLogLocation(m *LogWatch) { +func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsumer) (string, error) { + if h.grafanaUrl != "" { + return h.grafanaUrl, nil + } + queries := make([]GrafanaExploreQuery, 0) rangeFrom := time.Now() - rangeTo := time.Now() + rangeTo := time.Now().Add(time.Minute) //just to make sure we get the last message - for _, c := range m.consumers { + for _, c := range consumers { if c.hasLogTarget(Loki) { queries = append(queries, GrafanaExploreQuery{ refId: c.name, @@ -144,30 +146,37 @@ func (h LokiLogHandler) PrintLogLocation(m *LogWatch) { var firstMsg struct { Ts string `json:"ts"` } + if err := json.Unmarshal([]byte(c.Messages[0]), &firstMsg); err != nil { - m.log.Error().Err(err).Str("container", c.name).Msg("Failed to unmarshal first log message") - } else { - firstTs, err := time.Parse(time.RFC3339, firstMsg.Ts) - if err != nil { - m.log.Error().Err(err).Str("container", c.name).Msg("Failed to parse first log message timestamp") - } else { - if firstTs.Before(rangeFrom) { - rangeFrom = firstTs - } - } + return "", errors.Errorf("failed to unmarshal first log message for container '%s'", c.name) + } + + firstTs, err := time.Parse(time.RFC3339, firstMsg.Ts) + if err != nil { + return "", errors.Errorf("failed to parse first log message's timestamp '%+v' for container '%s'", firstTs, c.name) + } + + if firstTs.Before(rangeFrom) { + rangeFrom = firstTs } } } - grafanaUrl := GrafanaExploreUrl{ + if len(queries) == 0 { + return "", errors.New("no Loki consumers found") + } + + h.grafanaUrl = GrafanaExploreUrl{ baseurl: os.Getenv("GRAFANA_URL"), datasource: os.Getenv("GRAFANA_DATASOURCE"), queries: queries, rangeFrom: rangeFrom.UnixMilli(), - rangeTo: rangeTo.UnixMilli() + 60000, //just to make sure we get the last message + rangeTo: rangeTo.UnixMilli(), }.getUrl() - m.log.Info().Str("URL", string(grafanaUrl)).Msg("Loki logs can be found in Grafana at (will only work when you unescape quotes):") + return h.grafanaUrl, nil +} - fmt.Printf("Loki logs can be found in Grafana at: %s\n", grafanaUrl) +func (h LokiLogHandler) GetTarget() LogTarget { + return Loki } diff --git a/testsummary/cmd/internal/print_commands.go b/testsummary/cmd/internal/print_commands.go new file mode 100644 index 000000000..4783d2b12 --- /dev/null +++ b/testsummary/cmd/internal/print_commands.go @@ -0,0 +1,67 @@ +package internal + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" + + ts "github.com/smartcontractkit/chainlink-testing-framework/testsummary" +) + +var PrintKeyCmd = &cobra.Command{ + Use: "print-key [key]", + Short: "Prints all values for the given key from test summary file", + RunE: printKeyRunE, +} + +func init() { + PrintKeyCmd.Flags().Bool("json", true, "print as json") + PrintKeyCmd.Flags().Bool("md", true, "print as mardkown") +} + +func printKeyRunE(cmd *cobra.Command, args []string) error { + if len(args) != 1 || args[0] == "" { + return cmd.Help() + } + + key := strings.ToLower(args[0]) + + f, err := os.OpenFile(ts.SUMMARY_FILE, os.O_RDONLY, 0444) + if err != nil { + return err + } + defer f.Close() + + fc, err := io.ReadAll(f) + if err != nil { + return err + } + + var sk ts.SummaryKeys + err = json.Unmarshal(fc, &sk) + if err != nil { + return err + } + + if entry, ok := sk[key]; ok { + if cmd.Flag("json").Value.String() == "true" { + fmt.Println(prettyPrint(entry)) + } else if cmd.Flag("md").Value.String() == "true" { + panic("not implemented") + } else { + fmt.Printf("%+v\n", entry) + } + return nil + } + + return fmt.Errorf("no entry for key '%s' found", args[0]) +} + +func prettyPrint(i interface{}) string { + s, _ := json.MarshalIndent(i, "", "\t") + return string(s) +} diff --git a/testsummary/cmd/main.go b/testsummary/cmd/main.go new file mode 100644 index 000000000..154e2da0d --- /dev/null +++ b/testsummary/cmd/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "log" + "os" + + "github.com/spf13/cobra" + + "github.com/smartcontractkit/chainlink-testing-framework/testsummary/cmd/internal" +) + +var rootCmd = &cobra.Command{ + Use: "test-summary", + Short: "Tests summary printer", +} + +func init() { + rootCmd.AddCommand(internal.PrintKeyCmd) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/testsummary/summary.go b/testsummary/summary.go new file mode 100644 index 000000000..ff58eb578 --- /dev/null +++ b/testsummary/summary.go @@ -0,0 +1,76 @@ +package testsummary + +import ( + "encoding/json" + "io" + "os" + "sync" + + "github.com/pkg/errors" +) + +const SUMMARY_FILE = "test_summary.json" + +type SummaryKeys map[string][]KeyContent + +type KeyContent struct { + TestName string `json:"test_name"` + Value string `json:"value"` +} + +var mu sync.Mutex + +// TODO in future allow value to be also []string or map[string]string? +func AddEntry(testName, key string, value interface{}) error { + mu.Lock() + defer mu.Unlock() + + if _, ok := value.(string); !ok { + return errors.Errorf("type '%T' not supported", value) + } + strValue := value.(string) + + f, err := os.OpenFile(SUMMARY_FILE, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fc, err := io.ReadAll(f) + if err != nil { + return err + } + + var entries SummaryKeys + err = json.Unmarshal(fc, &entries) + if err != nil { + return err + } + + if entry, ok := entries[key]; ok { + testFound := false + for idx, testValue := range entry { + // overwrite if entry for test exists + if testValue.TestName == testName { + entry[idx].Value = strValue + testFound = true + break + } + } + + // add new entry to existing key if no entry for test exists + if !testFound { + entries[key] = append(entries[key], KeyContent{TestName: testName, Value: strValue}) + } + } else { + entries[key] = []KeyContent{{TestName: testName, Value: strValue}} + } + + encoder := json.NewEncoder(f) + err = encoder.Encode(entries) + if err != nil { + return err + } + + return nil +} From f6086c998f31e79a5363c5b3b6525ad0b3b28162 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 24 Oct 2023 17:52:29 +0200 Subject: [PATCH 07/40] update go.mod --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 9c27e20ed..66949971f 100644 --- a/go.mod +++ b/go.mod @@ -225,7 +225,7 @@ require ( github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect From ccc5ba0a169740a3dbaf3a864cbc943f3cde898c Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 28 Nov 2023 09:00:21 +0100 Subject: [PATCH 08/40] logwatch that retries getting docker logs if for some reason it fails --- docker/test_env/env_component_test.go | 2 +- go.mod | 15 ++- go.sum | 44 ++++++--- logwatch/logwatch.go | 135 ++++++++++++++++++++++---- logwatch/logwatch_user_test.go | 2 + utils/retries/retry_functions.go | 8 ++ 6 files changed, 166 insertions(+), 40 deletions(-) create mode 100644 utils/retries/retry_functions.go diff --git a/docker/test_env/env_component_test.go b/docker/test_env/env_component_test.go index 401b7a26c..8d2de628f 100644 --- a/docker/test_env/env_component_test.go +++ b/docker/test_env/env_component_test.go @@ -27,7 +27,7 @@ func followLogs(t *testing.T, c testcontainers.Container) *TestLogConsumer { } go func() { c.FollowOutput(consumer) - err := c.StartLogProducer(testcontext.Get(t)) + err := c.StartLogProducer(testcontext.Get(t), time.Duration(5*time.Second)) require.NoError(t, err) }() return consumer diff --git a/go.mod b/go.mod index 2433dfc91..2d67b07db 100644 --- a/go.mod +++ b/go.mod @@ -29,18 +29,24 @@ require ( ) require ( + github.com/Microsoft/hcsshim v0.11.1 // indirect github.com/allegro/bigcache v1.2.1 // indirect github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect github.com/cespare/cp v1.1.1 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/hashicorp/go-version v1.6.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/pyroscope-io/client v0.7.1 // indirect github.com/rs/cors v1.8.3 // indirect github.com/sercand/kuberesolver/v4 v4.0.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.9 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect ) @@ -48,6 +54,7 @@ require ( replace ( // replicating the replace directive on cosmos SDK github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa k8s.io/api => k8s.io/api v0.25.11 k8s.io/apimachinery => k8s.io/apimachinery v0.25.11 @@ -88,7 +95,7 @@ require ( github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect - github.com/containerd/containerd v1.7.3 // indirect + github.com/containerd/containerd v1.7.7 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/dockercfg v0.3.1 // indirect @@ -96,7 +103,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.5+incompatible // indirect + github.com/docker/docker v24.0.6+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect @@ -190,7 +197,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect @@ -203,7 +210,7 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect diff --git a/go.sum b/go.sum index 8385484b4..2a14fb26f 100644 --- a/go.sum +++ b/go.sum @@ -391,8 +391,8 @@ cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -434,12 +434,14 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= -github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= +github.com/Microsoft/hcsshim v0.11.1 h1:hJ3s7GbWlGK4YVV92sO88BQSyF4ZLVy7/awqOlPxFbA= +github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa h1:BZFDvIeTQQRpN12FpTiuDL92BqNYuHsW16N2Oy4af/U= +github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa/go.mod h1:ICriE9bLX5CLxL9OFQ2N+2N+f+803LNJ1utJb1+Inx0= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= @@ -552,8 +554,10 @@ github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5w github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/containerd/containerd v1.7.3 h1:cKwYKkP1eTj54bP3wCdXXBymmKRQMrWjkLSWZZJDa8o= -github.com/containerd/containerd v1.7.3/go.mod h1:32FOM4/O0RkNg7AjQj3hDzN9cUGtu+HMvaKUNiqCZB8= +github.com/containerd/containerd v1.7.7 h1:QOC2K4A42RQpcrZyptP6z9EJZnlHfHJUfZrAAHe15q4= +github.com/containerd/containerd v1.7.7/go.mod h1:3c4XZv6VeT9qgf9GMTxNTMFxGJrGpI2vz1yk4ye+YY8= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -571,8 +575,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -591,8 +593,8 @@ github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVo github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= -github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1117,6 +1119,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1182,8 +1186,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= @@ -1237,8 +1241,8 @@ github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= -github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= @@ -1281,6 +1285,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/alertmanager v0.25.1 h1:LGBNMspOfv8h7brb+LWj2wnwBCg2ZuuKWTh6CAVw2/Y= github.com/prometheus/alertmanager v0.25.1/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1369,6 +1375,12 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1421,8 +1433,6 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= -github.com/testcontainers/testcontainers-go v0.23.0/go.mod h1:3gzuZfb7T9qfcH2pHpV4RLlWrPjeWNQah6XlYQ32c4I= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= @@ -1766,6 +1776,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1823,6 +1834,7 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index ed3a269c3..71af4a040 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -6,9 +6,11 @@ import ( "regexp" "strings" "testing" + "time" "github.com/pkg/errors" "github.com/rs/zerolog" + "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" "github.com/smartcontractkit/wasp" "github.com/testcontainers/testcontainers-go" @@ -27,14 +29,17 @@ type LogNotification struct { // LogWatch is a test helper struct to monitor docker container logs for some patterns // and push their logs into Loki for further analysis type LogWatch struct { - testName string - log zerolog.Logger - loki *wasp.LokiClient - patterns map[string][]*regexp.Regexp - notifyTest chan *LogNotification - containers []testcontainers.Container - consumers map[string]*ContainerLogConsumer - logTargetHandlers map[LogTarget]HandleLogTarget + testName string + log zerolog.Logger + loki *wasp.LokiClient + patterns map[string][]*regexp.Regexp + notifyTest chan *LogNotification + containers []testcontainers.Container + consumers map[string]*ContainerLogConsumer + logTargetHandlers map[LogTarget]HandleLogTarget + logListeningDone chan struct{} + logProducerTimeout time.Duration + logProducerTimeoutRetryLimit int // -1 for infinite retries } type LogContent struct { @@ -56,12 +61,15 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... } logWatch := &LogWatch{ - testName: testName, - log: l, - patterns: patterns, - notifyTest: make(chan *LogNotification, 10000), - consumers: make(map[string]*ContainerLogConsumer, 0), - logTargetHandlers: getDefaultLogHandlers(), + testName: testName, + log: l, + patterns: patterns, + notifyTest: make(chan *LogNotification, 10000), + consumers: make(map[string]*ContainerLogConsumer, 0), + logTargetHandlers: getDefaultLogHandlers(), + logListeningDone: make(chan struct{}, 1), + logProducerTimeout: time.Duration(10 * time.Second), + logProducerTimeoutRetryLimit: 10, } for _, option := range options { @@ -124,6 +132,18 @@ func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { } } +func WithLogProducerTimeout(timeout time.Duration) Option { + return func(lw *LogWatch) { + lw.logProducerTimeout = timeout + } +} + +func WithLogProducerTimeoutRetryLimit(retryLimit int) Option { + return func(lw *LogWatch) { + lw.logProducerTimeoutRetryLimit = retryLimit + } +} + // Listen listen for the next notification func (m *LogWatch) Listen() *LogNotification { msg := <-m.notifyTest @@ -172,22 +192,97 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainer m.log.Info(). Str("Prefix", prefix). Str("Name", name). + Str("Timeout", m.logProducerTimeout.String()). Msg("Connecting container logs") m.consumers[name] = cons m.containers = append(m.containers, container) container.FollowOutput(cons) - return container.StartLogProducer(ctx) + + err = container.StartLogProducer(ctx, m.logProducerTimeout) + + go func(done chan struct{}, timeout time.Duration, retryLimit int) { + defer m.log.Info().Str("Container name", name).Msg("Log listener stopped") + currentAttempt := 0 + + var shouldRetry = func() bool { + if retryLimit == -1 { + return true + } + + if currentAttempt < retryLimit { + currentAttempt++ + return true + } + + return false + } + + for { + select { + case err = <-container.GetLogProducerErrorChannel(): + if err != nil { + m.log.Error(). + Str("Name", name). + Err(err). + Msg("Log producer errored") + if shouldRetry() { + backoff := retries.Fibonacci(currentAttempt) + timeout = timeout + time.Duration(backoff)*time.Millisecond + m.log.Info(). + Str("Prefix", prefix). + Str("Name", name). + Str("Timeout", timeout.String()). + Msgf("Retrying connection and listening to container logs. Attempt %d/%d", currentAttempt, retryLimit) + err = container.StartLogProducer(ctx, timeout) + if err != nil { + m.log.Error().Err(err).Msg("Log producer was already running. This should never happen. Exiting") + return + } + m.log.Info(). + Str("Name", name). + Msg("Started new log producer") + } else { + m.log.Error(). + Err(err). + Str("Name", name). + Msg("Used all attempts to listen to container logs. Won't try again") + return + } + } + case <-done: + return + } + } + }(m.logListeningDone, m.logProducerTimeout, m.logProducerTimeoutRetryLimit) + + return err } // Shutdown disconnects all containers, stops notifications -func (m *LogWatch) Shutdown() { +func (m *LogWatch) Shutdown() error { + defer close(m.logListeningDone) + var err error for _, c := range m.containers { - m.DisconnectContainer(c) + singleErr := m.DisconnectContainer(c) + if singleErr != nil { + ctx := context.Background() + name, _ := c.Name(ctx) + m.log.Error(). + Err(err). + Str("Name", name). + Msg("Failed to disconnect container") + + err = errors.Wrap(singleErr, "failed to disconnect container") + } } if m.loki != nil { m.loki.Stop() } + + m.logListeningDone <- struct{}{} + + return err } type LogWriter = func(testName string, name string, location interface{}) error @@ -215,11 +310,13 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } // DisconnectContainer disconnects the particular container -func (m *LogWatch) DisconnectContainer(container testcontainers.Container) { +func (m *LogWatch) DisconnectContainer(container testcontainers.Container) error { if container.IsRunning() { m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") - _ = container.StopLogProducer() + return container.StopLogProducer() } + + return nil } // ContainerLogs return all logs for the particular container diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index 1384e6337..69bcb176b 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -67,6 +67,7 @@ func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { func TestExampleUserInteraction(t *testing.T) { os.Setenv("LOGWATCH_LOG_TARGETS", "") t.Run("sync API, block, receive one message", func(t *testing.T) { + ctx := context.Background() testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} d, err := NewDeployment(ctx, testData) // nolint @@ -87,6 +88,7 @@ func TestExampleUserInteraction(t *testing.T) { require.NotEmpty(t, match) }) t.Run("async API, execute some logic on match", func(t *testing.T) { + ctx := context.Background() testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD\n", "E\nF\nG\nH\n"}} notifications := 0 d, err := NewDeployment(ctx, testData) diff --git a/utils/retries/retry_functions.go b/utils/retries/retry_functions.go new file mode 100644 index 000000000..1754207cf --- /dev/null +++ b/utils/retries/retry_functions.go @@ -0,0 +1,8 @@ +package retries + +func Fibonacci(n int) int { + if n <= 1 { + return n + } + return Fibonacci(n-1) + Fibonacci(n-2) +} From 9dea3552b3ec9cf3a033879e7c78dc4d40bc2a40 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 30 Nov 2023 10:02:27 +0100 Subject: [PATCH 09/40] tests: WIP --- .../ethereum_network_e4eccee4.json | 35 +++ go.mod | 3 +- go.sum | 2 - logwatch/__debug_bin3072951826 | 0 logwatch/logwatch.go | 95 ++++-- logwatch/logwatch_handlers.go | 1 - logwatch/logwatch_test.go | 273 +++++++++++++++++- logwatch/logwatch_user_test.go | 139 ++++++++- 8 files changed, 515 insertions(+), 33 deletions(-) create mode 100644 .private_chains/ethereum_network_e4eccee4.json create mode 100644 logwatch/__debug_bin3072951826 diff --git a/.private_chains/ethereum_network_e4eccee4.json b/.private_chains/ethereum_network_e4eccee4.json new file mode 100644 index 000000000..084e9a042 --- /dev/null +++ b/.private_chains/ethereum_network_e4eccee4.json @@ -0,0 +1,35 @@ +{ + "consensus_type": "pos", + "consensus_layer": "prysm", + "execution_layer": "geth", + "docker_network_names": [ + "network-8bf1e6e0-c548-4cd8-b450-24ce360ca1d4" + ], + "containers": [ + { + "container_name": "geth2-7b45d1d1", + "container_type": "geth2" + }, + { + "container_name": "prysm-beacon-chain-3f00fc81", + "container_type": "prysm-beacon" + }, + { + "container_name": "prysm-validator-d799b911", + "container_type": "prysm-validator" + } + ], + "wait_for_finalization": false, + "generated_data_host_dir": "/var/folders/q4/975jb0kx3bb2x_yy0_7mf2y00000gn/T/custom_config_data1373575570", + "val_keys_dir": "/var/folders/q4/975jb0kx3bb2x_yy0_7mf2y00000gn/T/val_keys1464778088", + "ethereum_chain_config": { + "slots_per_epoch": 6, + "seconds_per_slot": 2, + "genesis_delay": 15, + "validator_count": 8, + "chain_id": 1337, + "addresses_to_fund": [ + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + ] + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index 2d67b07db..1294a48ee 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,8 @@ require ( replace ( // replicating the replace directive on cosmos SDK github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa + // github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231128125734-9878fec1b450 + github.com/testcontainers/testcontainers-go => ../testcontainers-go k8s.io/api => k8s.io/api v0.25.11 k8s.io/apimachinery => k8s.io/apimachinery v0.25.11 diff --git a/go.sum b/go.sum index 2a14fb26f..72a42fb0b 100644 --- a/go.sum +++ b/go.sum @@ -440,8 +440,6 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa h1:BZFDvIeTQQRpN12FpTiuDL92BqNYuHsW16N2Oy4af/U= -github.com/Tofel/testcontainers-go v0.0.0-20231128120608-9b1cc6b8c8fa/go.mod h1:ICriE9bLX5CLxL9OFQ2N+2N+f+803LNJ1utJb1+Inx0= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= diff --git a/logwatch/__debug_bin3072951826 b/logwatch/__debug_bin3072951826 new file mode 100644 index 000000000..e69de29bb diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 71af4a040..b4644a0b5 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -5,16 +5,17 @@ import ( "os" "regexp" "strings" + "sync" "testing" "time" "github.com/pkg/errors" "github.com/rs/zerolog" - "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" "github.com/smartcontractkit/wasp" "github.com/testcontainers/testcontainers-go" "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" ) const NO_TEST = "no_test" @@ -26,6 +27,16 @@ type LogNotification struct { Log string } +type LogProducingContainer interface { + Name(ctx context.Context) (string, error) + FollowOutput(consumer testcontainers.LogConsumer) + StartLogProducer(ctx context.Context, timeout time.Duration) error + StopLogProducer() error + GetLogProducerErrorChannel() <-chan error + IsRunning() bool + GetContainerID() string +} + // LogWatch is a test helper struct to monitor docker container logs for some patterns // and push their logs into Loki for further analysis type LogWatch struct { @@ -34,12 +45,14 @@ type LogWatch struct { loki *wasp.LokiClient patterns map[string][]*regexp.Regexp notifyTest chan *LogNotification - containers []testcontainers.Container + containers []LogProducingContainer consumers map[string]*ContainerLogConsumer logTargetHandlers map[LogTarget]HandleLogTarget + enabledLogTargets []LogTarget logListeningDone chan struct{} logProducerTimeout time.Duration logProducerTimeoutRetryLimit int // -1 for infinite retries + acceptMutex sync.Mutex } type LogContent struct { @@ -60,6 +73,11 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... testName = t.Name() } + envLogTargets, err := getLogTargetsFromEnv() + if err != nil { + return nil, err + } + logWatch := &LogWatch{ testName: testName, log: l, @@ -70,6 +88,7 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... logListeningDone: make(chan struct{}, 1), logProducerTimeout: time.Duration(10 * time.Second), logProducerTimeoutRetryLimit: 10, + enabledLogTargets: envLogTargets, } for _, option := range options { @@ -84,13 +103,8 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... } func (m *LogWatch) validateLogTargets() error { - envLogTargets, err := getLogTargetsFromEnv() - if err != nil { - return err - } - // check if all requested log targets are supported - for _, wantedTarget := range envLogTargets { + for _, wantedTarget := range m.enabledLogTargets { found := false for knownTarget := range m.logTargetHandlers { if knownTarget == wantedTarget { @@ -107,7 +121,7 @@ func (m *LogWatch) validateLogTargets() error { // deactivate known log targets that are not enabled for knownTarget := range m.logTargetHandlers { wanted := false - for _, wantedTarget := range envLogTargets { + for _, wantedTarget := range m.enabledLogTargets { if knownTarget == wantedTarget { wanted = true break @@ -120,7 +134,7 @@ func (m *LogWatch) validateLogTargets() error { } if len(m.logTargetHandlers) == 0 { - m.log.Warn().Msg("No log targets enabled. LogWatch will not do anything") + m.log.Warn().Msg("No log targets enabled. LogWatch will not persist any logs") } return nil @@ -132,6 +146,12 @@ func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { } } +func WithLogTarget(logTarget LogTarget) Option { + return func(lw *LogWatch) { + lw.enabledLogTargets = append(lw.enabledLogTargets, logTarget) + } +} + func WithLogProducerTimeout(timeout time.Duration) Option { return func(lw *LogWatch) { lw.logProducerTimeout = timeout @@ -169,7 +189,7 @@ func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { } // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer -func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainers.Container, prefix string) error { +func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { name, err := container.Name(ctx) if err != nil { return err @@ -219,35 +239,54 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainer for { select { - case err = <-container.GetLogProducerErrorChannel(): + case err := <-container.GetLogProducerErrorChannel(): if err != nil { m.log.Error(). - Str("Name", name). + Str("Container name", name). Err(err). Msg("Log producer errored") if shouldRetry() { backoff := retries.Fibonacci(currentAttempt) - timeout = timeout + time.Duration(backoff)*time.Millisecond + timeout = timeout + time.Duration(backoff)*time.Second m.log.Info(). Str("Prefix", prefix). - Str("Name", name). + Str("Container name", name). Str("Timeout", timeout.String()). Msgf("Retrying connection and listening to container logs. Attempt %d/%d", currentAttempt, retryLimit) - err = container.StartLogProducer(ctx, timeout) - if err != nil { - m.log.Error().Err(err).Msg("Log producer was already running. This should never happen. Exiting") + // we will request all logs from the start, when we start log producer, so we need to remove ones already saved to avoid duplicates + // in the unlikely case that log producer fails to start we will copy the messages, so that at least some logs are salvaged + messagesCopy := append([]string{}, m.consumers[name].Messages...) + m.consumers[name].Messages = make([]string, 0) + m.log.Warn().Msgf("Consumer messages: %d", len(m.consumers[name].Messages)) + startTime := time.Now() + timedout := false + for container.StartLogProducer(ctx, timeout) != nil { + if time.Since(startTime) >= 5*time.Second { + timedout = true + break + } + m.log.Info().Msg("Waiting for log producer to stop") + time.Sleep(500 * time.Millisecond) + } + if timedout { + m.log.Error(). + Err(err). + Msg("Previously running log producer couldn't be stopped. Won't try again") + m.consumers[name].Messages = messagesCopy return } m.log.Info(). - Str("Name", name). + Str("Container name", name). Msg("Started new log producer") } else { m.log.Error(). Err(err). - Str("Name", name). + Str("Container name", name). Msg("Used all attempts to listen to container logs. Won't try again") return } + + time.Sleep(500 * time.Millisecond) } case <-done: return @@ -259,14 +298,13 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container testcontainer } // Shutdown disconnects all containers, stops notifications -func (m *LogWatch) Shutdown() error { +func (m *LogWatch) Shutdown(context context.Context) error { defer close(m.logListeningDone) var err error for _, c := range m.containers { singleErr := m.DisconnectContainer(c) if singleErr != nil { - ctx := context.Background() - name, _ := c.Name(ctx) + name, _ := c.Name(context) m.log.Error(). Err(err). Str("Name", name). @@ -310,7 +348,7 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } // DisconnectContainer disconnects the particular container -func (m *LogWatch) DisconnectContainer(container testcontainers.Container) error { +func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { if container.IsRunning() { m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") return container.StopLogProducer() @@ -321,6 +359,8 @@ func (m *LogWatch) DisconnectContainer(container testcontainers.Container) error // ContainerLogs return all logs for the particular container func (m *LogWatch) ContainerLogs(name string) []string { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() if _, ok := m.consumers[name]; !ok { return []string{} } @@ -330,6 +370,8 @@ func (m *LogWatch) ContainerLogs(name string) []string { // AllLogs returns all logs for all containers func (m *LogWatch) AllLogs() []string { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() logs := make([]string, 0) for _, l := range m.consumers { logs = append(logs, l.Messages...) @@ -339,6 +381,8 @@ func (m *LogWatch) AllLogs() []string { // PrintAll prints all logs for all containers connected func (m *LogWatch) PrintAll() { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() for cname, c := range m.consumers { for _, msg := range c.Messages { m.log.Info(). @@ -373,6 +417,9 @@ func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, // Accept accepts the log message from particular container func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { + g.lw.acceptMutex.Lock() + defer g.lw.acceptMutex.Unlock() + g.lw.log.Info().Msgf("Received log message: %s", string(l.Content)) g.Messages = append(g.Messages, string(l.Content)) matches := g.FindMatch(l) for i := 0; i < matches; i++ { diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 7b0d7ce0b..b9930a839 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -21,7 +21,6 @@ const ( type HandleLogTarget interface { Handle(*ContainerLogConsumer, LogContent) error - // PrintLogLocation(*LogWatch) GetLogLocation(map[string]*ContainerLogConsumer) (string, error) GetTarget() LogTarget } diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 80f02fc07..ab564a9d1 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -3,15 +3,16 @@ package logwatch_test import ( "context" "fmt" - "os" "reflect" "regexp" "strconv" + "sync" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/google/uuid" + "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" @@ -77,7 +78,7 @@ func startTestContainer(ctx context.Context, containerName string, msg string, a } else { cmd = []string{"bash", "-c", fmt.Sprintf( - "for i in {1..%d}; do sleep %.2f; echo '%s'; done; while true; do sleep 1; done", + "for i in {1..%d}; do sleep %.2f; echo \"%s-$i\"; done; while true; do sleep 1; done", amount, intervalSeconds, msg, @@ -95,7 +96,6 @@ func startTestContainer(ctx context.Context, containerName string, msg string, a } func TestLogWatchDocker(t *testing.T) { - os.Setenv("LOGWATCH_LOG_TARGETS", "") tests := []TestCase{ { name: "should read exactly 10 streams (1 container)", @@ -211,7 +211,9 @@ func TestLogWatchDocker(t *testing.T) { // this code terminates the containers properly for _, c := range containers { if !tc.exitEarly { - lw.DisconnectContainer(c) + if err := lw.DisconnectContainer(c); err != nil { + t.Fatalf("failed to disconnect container: %s", err.Error()) + } if err := c.Terminate(ctx); err != nil { t.Fatalf("failed to terminate container: %s", err.Error()) } @@ -221,3 +223,266 @@ func TestLogWatchDocker(t *testing.T) { }) } } + +func TestLogWatchConnectWithDelayDocker(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + containerName := fmt.Sprintf("%s-container-%s", "TestLogWatchConnectRetryDocker", uuid.NewString()) + message := "message" + interval := float64(1) + amount := 10 + + //set initial timeout to 0 so that it retries to connect using fibonacci backoff + lw, err := logwatch.NewLogWatch(t, nil) + require.NoError(t, err) + container, err := startTestContainer(ctx, containerName, message, amount, interval, false) + require.NoError(t, err) + name, err := container.Name(ctx) + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + err = lw.ConnectContainer(context.Background(), container, name) + require.NoError(t, err) + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) + lw.PrintAll() + + require.Len(t, lw.ContainerLogs(containerName), amount) + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + if err := container.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate container: %s", err.Error()) + } + }) +} + +type MockedLogProducingContainer struct { + name string + id string + isRunning bool + consumer testcontainers.LogConsumer + startError error + startSleep time.Duration + // acceptsLogs bool + stopError error + errorChannelError error + startCounter int + messages []string + logMutex sync.Mutex + errorCh chan error +} + +func (m *MockedLogProducingContainer) Name(ctx context.Context) (string, error) { + return m.name, nil +} + +func (m *MockedLogProducingContainer) FollowOutput(consumer testcontainers.LogConsumer) { + m.consumer = consumer +} + +func (m *MockedLogProducingContainer) StartLogProducer(ctx context.Context, timeout time.Duration) error { + m.startCounter++ + m.errorCh = make(chan error, 1) + + if m.startError != nil { + // m.acceptsLogs = false + return m.startError + } + + if m.startSleep > 0 { + time.Sleep(m.startSleep) + } + + // m.logMutex.Lock() + // defer m.logMutex.Unlock() + // m.acceptsLogs = true + + // store index of last processed log + // iterate over m.messages in a goroutine and accept new logs + go func() { + fmt.Println("starting log producer loop") + lastProcessedLogIndex := -1 + for { + time.Sleep(200 * time.Millisecond) + { + // m.lock("loop") + m.errorCh <- m.errorChannelError + if m.errorChannelError != nil { + fmt.Println("stopping log producer loop") + // m.unlock("loop") + return + } + // m.unlock("loop") + } + for i, msg := range m.messages { + time.Sleep(200 * time.Millisecond) + // fmt.Printf("lastProcessedLogIndex: %d, i: %d\n", lastProcessedLogIndex, i) + if i <= lastProcessedLogIndex { + fmt.Println("skipping log") + continue + } + lastProcessedLogIndex = i + fmt.Println("processing log") + m.consumer.Accept(testcontainers.Log{ + LogType: testcontainers.StdoutLog, + Content: []byte(msg), + }) + } + } + }() + + return nil +} + +func (m *MockedLogProducingContainer) StopLogProducer() error { + return m.stopError +} + +func (m *MockedLogProducingContainer) GetLogProducerErrorChannel() <-chan error { + return m.errorCh +} + +func (m *MockedLogProducingContainer) IsRunning() bool { + return m.isRunning +} + +func (m *MockedLogProducingContainer) GetContainerID() string { + return m.id +} + +func (m *MockedLogProducingContainer) SendLog(msg string) { + m.messages = append(m.messages, msg) + fmt.Println("new log sent") +} + +func (m *MockedLogProducingContainer) lock(msg string) { + m.logMutex.Lock() + fmt.Printf("lock acquired: %s\n", msg) +} + +func (m *MockedLogProducingContainer) unlock(msg string) { + m.logMutex.Unlock() + fmt.Printf("lock released: %s\n", msg) +} + +// make sure that before it stopped working it received at least 1 log +// and that when it's started again, then it removes old logs and then receives old ones again and then follow the new ones +func TestLogWatchConnectRetryMockContainer(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + uuid := uuid.NewString() + amount := 10 + interval := float64(1.12) + + mockedContainer := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), + id: uuid, + // isRunning: true, + startError: nil, + stopError: nil, + errorChannelError: nil, + // acceptsLogs: true, + } + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + require.NoError(t, err) + + go func() { + time.Sleep(1 * time.Second) + { + // mockedContainer.lock("set error") + mockedContainer.startSleep = 1 * time.Second + mockedContainer.errorChannelError = errors.New("test error") + // mockedContainer.unlock("set error") + } + // mockedContainer.startSleep = 1 * time.Second + time.Sleep(1 * time.Second) + { + // mockedContainer.lock("unset error") + mockedContainer.errorChannelError = nil + // mockedContainer.unlock("unset errors") + } + }() + + go func() { + // time.Sleep(500 * time.Millisecond) + for i := 0; i < amount; i++ { + mockedContainer.SendLog(fmt.Sprintf("message-%d", i)) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) + } + }() + + err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) + require.NoError(t, err) + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) + lw.PrintAll() + + require.Len(t, lw.ContainerLogs(mockedContainer.name), 10) + require.Equal(t, 2, mockedContainer.startCounter) + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + }) +} + +// as above, but with 3 restarts, 2nd it is still running, 3rd it works +// it should have 10 logs in the end +func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + uuid := uuid.NewString() + amount := 10 + interval := float64(1) + + mockedContainer := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), + id: uuid, + isRunning: false, + startError: nil, + stopError: nil, + errorChannelError: nil, + } + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + require.NoError(t, err) + + go func() { + for i := 0; i < amount; i++ { + mockedContainer.SendLog(fmt.Sprintf("message-%d", i)) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) + } + time.Sleep(500 * time.Millisecond) + }() + + err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) + require.NoError(t, err) + + go func() { + mockedContainer.startSleep = 1 * time.Second + mockedContainer.isRunning = true + mockedContainer.errorChannelError = errors.New("read error") + mockedContainer.startError = errors.New("still running") + time.Sleep(2 * time.Second) + mockedContainer.startError = nil + mockedContainer.errorChannelError = nil + }() + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) + lw.PrintAll() + + require.Len(t, lw.ContainerLogs(mockedContainer.name), 10) + require.Equal(t, 3, mockedContainer.startCounter) + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + }) +} diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index 69bcb176b..5b04b976b 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -1,6 +1,7 @@ package logwatch_test import ( + "bytes" "context" "fmt" "os" @@ -65,7 +66,6 @@ func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { /* That's how you use it */ func TestExampleUserInteraction(t *testing.T) { - os.Setenv("LOGWATCH_LOG_TARGETS", "") t.Run("sync API, block, receive one message", func(t *testing.T) { ctx := context.Background() testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} @@ -114,3 +114,140 @@ func TestExampleUserInteraction(t *testing.T) { require.Equal(t, testData.repeat*len(testData.streams), notifications) }) } + +var ( + A = []byte("A\n") + B = []byte("B\n") + C = []byte("C\n") +) + +func TestFileLoggingTarget(t *testing.T) { + ctx := context.Background() + testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} + d, err := NewDeployment(ctx, testData) + // nolint + defer d.Shutdown(ctx) + require.NoError(t, err) + lw, err := logwatch.NewLogWatch( + t, + nil, + logwatch.WithLogTarget(logwatch.File), + ) + require.NoError(t, err) + err = d.ConnectLogs(lw) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + var logFileLocation string + + bufferWriter := func(_ string, _ string, location interface{}) error { + logFileLocation = location.(string) + return nil + } + + lw.SaveLogTargetsLocations(bufferWriter) + + content, err := os.ReadFile(logFileLocation + "/container-0.log") + require.NoError(t, err) + + require.True(t, bytes.Contains(content, A), "A should be present in log file") + require.True(t, bytes.Contains(content, B), "B should be present in log file") + require.True(t, bytes.Contains(content, C), "C should be present in log file") +} + +type MockedLogHandler struct { + logs []logwatch.LogContent + Target logwatch.LogTarget +} + +func (m *MockedLogHandler) Handle(consumer *logwatch.ContainerLogConsumer, content logwatch.LogContent) error { + m.logs = append(m.logs, content) + return nil +} + +func (m *MockedLogHandler) GetLogLocation(consumers map[string]*logwatch.ContainerLogConsumer) (string, error) { + return "", nil +} + +func (m *MockedLogHandler) GetTarget() logwatch.LogTarget { + return m.Target +} + +func TestMultipleMockedLoggingTargets(t *testing.T) { + ctx := context.Background() + testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} + d, err := NewDeployment(ctx, testData) + // nolint + defer d.Shutdown(ctx) + require.NoError(t, err) + mockedFileHandler := &MockedLogHandler{Target: logwatch.File} + mockedLokiHanlder := &MockedLogHandler{Target: logwatch.Loki} + lw, err := logwatch.NewLogWatch( + t, + nil, + logwatch.WithCustomLogHandler(logwatch.File, mockedFileHandler), + logwatch.WithCustomLogHandler(logwatch.Loki, mockedLokiHanlder), + logwatch.WithLogTarget(logwatch.Loki), + logwatch.WithLogTarget(logwatch.File), + ) + require.NoError(t, err) + err = d.ConnectLogs(lw) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + assertMockedHandlerHasLogs(t, mockedFileHandler) + assertMockedHandlerHasLogs(t, mockedLokiHanlder) +} + +func TestOneMockedLoggingTarget(t *testing.T) { + ctx := context.Background() + testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} + d, err := NewDeployment(ctx, testData) + // nolint + defer d.Shutdown(ctx) + require.NoError(t, err) + mockedLokiHanlder := &MockedLogHandler{Target: logwatch.Loki} + lw, err := logwatch.NewLogWatch( + t, + nil, + logwatch.WithCustomLogHandler(logwatch.Loki, mockedLokiHanlder), + logwatch.WithLogTarget(logwatch.Loki), + ) + require.NoError(t, err) + err = d.ConnectLogs(lw) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + assertMockedHandlerHasLogs(t, mockedLokiHanlder) +} + +func assertMockedHandlerHasLogs(t *testing.T, handler *MockedLogHandler) { + matches := make(map[string]int) + matches["A"] = 0 + matches["B"] = 0 + matches["C"] = 0 + + for _, log := range handler.logs { + require.Equal(t, log.TestName, t.Name()) + require.Equal(t, log.ContainerName, "container-0") + + if bytes.Equal(log.Content, A) { + matches["A"]++ + } + + if bytes.Equal(log.Content, B) { + matches["B"]++ + } + + if bytes.Equal(log.Content, C) { + matches["C"]++ + } + } + + require.Greater(t, matches["A"], 0, "A should be present at least once in handler for %s", handler.Target) + require.Greater(t, matches["B"], 0, "B should be matched at least once in handler for %s", handler.Target) + require.Greater(t, matches["C"], 0, "C should be matched at least once in handler for %s", handler.Target) +} From 4f5e13f23bb7d7223908c5360ee0138ee8d95c7c Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 30 Nov 2023 11:51:52 +0100 Subject: [PATCH 10/40] working tests for logwatch retries --- logwatch/__debug_bin3072951826 | 0 logwatch/logwatch.go | 35 ++--- logwatch/logwatch_test.go | 270 ++++++++++++++++++++++++--------- 3 files changed, 216 insertions(+), 89 deletions(-) delete mode 100644 logwatch/__debug_bin3072951826 diff --git a/logwatch/__debug_bin3072951826 b/logwatch/__debug_bin3072951826 deleted file mode 100644 index e69de29bb..000000000 diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index b4644a0b5..aebb040b4 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -217,12 +217,11 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC m.consumers[name] = cons m.containers = append(m.containers, container) container.FollowOutput(cons) - err = container.StartLogProducer(ctx, m.logProducerTimeout) go func(done chan struct{}, timeout time.Duration, retryLimit int) { - defer m.log.Info().Str("Container name", name).Msg("Log listener stopped") - currentAttempt := 0 + defer m.log.Info().Str("Container name", name).Msg("Disconnected container logs") + currentAttempt := 1 var shouldRetry = func() bool { if retryLimit == -1 { @@ -239,11 +238,11 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC for { select { - case err := <-container.GetLogProducerErrorChannel(): - if err != nil { + case logErr := <-container.GetLogProducerErrorChannel(): + if logErr != nil { m.log.Error(). - Str("Container name", name). Err(err). + Str("Container name", name). Msg("Log producer errored") if shouldRetry() { backoff := retries.Fibonacci(currentAttempt) @@ -253,25 +252,28 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Str("Container name", name). Str("Timeout", timeout.String()). Msgf("Retrying connection and listening to container logs. Attempt %d/%d", currentAttempt, retryLimit) - // we will request all logs from the start, when we start log producer, so we need to remove ones already saved to avoid duplicates - // in the unlikely case that log producer fails to start we will copy the messages, so that at least some logs are salvaged + // when log producer starts again it will request all logs again, so we need to remove ones already saved by log watch to avoid duplicates + // in the unlikely case that log producer fails to start we will copy the messages received so far, so that at least some logs are salvaged messagesCopy := append([]string{}, m.consumers[name].Messages...) m.consumers[name].Messages = make([]string, 0) m.log.Warn().Msgf("Consumer messages: %d", len(m.consumers[name].Messages)) - startTime := time.Now() - timedout := false + + failedToStart := false for container.StartLogProducer(ctx, timeout) != nil { - if time.Since(startTime) >= 5*time.Second { - timedout = true + if !shouldRetry() { + failedToStart = true break } - m.log.Info().Msg("Waiting for log producer to stop") - time.Sleep(500 * time.Millisecond) + m.log.Info(). + Str("Container name", name). + Msg("Waiting for log producer to stop before restarting it") + time.Sleep(1 * time.Second) } - if timedout { + if failedToStart { m.log.Error(). Err(err). - Msg("Previously running log producer couldn't be stopped. Won't try again") + Str("Container name", name). + Msg("Previously running log producer couldn't be stopped. Used all retry attempts. Won't try again") m.consumers[name].Messages = messagesCopy return } @@ -419,7 +421,6 @@ func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { g.lw.acceptMutex.Lock() defer g.lw.acceptMutex.Unlock() - g.lw.log.Info().Msgf("Received log message: %s", string(l.Content)) g.Messages = append(g.Messages, string(l.Content)) matches := g.FindMatch(l) for i := 0; i < matches; i++ { diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index ab564a9d1..37e078f9c 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -6,7 +6,6 @@ import ( "reflect" "regexp" "strconv" - "sync" "testing" "time" @@ -78,7 +77,7 @@ func startTestContainer(ctx context.Context, containerName string, msg string, a } else { cmd = []string{"bash", "-c", fmt.Sprintf( - "for i in {1..%d}; do sleep %.2f; echo \"%s-$i\"; done; while true; do sleep 1; done", + "for i in {1..%d}; do sleep %.2f; echo '%s'; done; while true; do sleep 1; done", amount, intervalSeconds, msg, @@ -196,8 +195,9 @@ func TestLogWatchDocker(t *testing.T) { } t.Logf("notifications: %v", spew.Sdump(notifications)) t.Logf("expectations: %v", spew.Sdump(tc.expectedNotifications)) + if !reflect.DeepEqual(tc.expectedNotifications, notifications) { - t.Fatalf("expected: %v, got: %v", tc.expectedNotifications, notifications) + t.Fatalf("expected logs: %v, got: %v", tc.expectedNotifications, notifications) } } @@ -261,19 +261,18 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { } type MockedLogProducingContainer struct { - name string - id string - isRunning bool - consumer testcontainers.LogConsumer - startError error - startSleep time.Duration - // acceptsLogs bool + name string + id string + isRunning bool + consumer testcontainers.LogConsumer + startError error + startSleep time.Duration stopError error errorChannelError error startCounter int messages []string - logMutex sync.Mutex - errorCh chan error + // logMutex sync.Mutex + errorCh chan error } func (m *MockedLogProducingContainer) Name(ctx context.Context) (string, error) { @@ -289,7 +288,6 @@ func (m *MockedLogProducingContainer) StartLogProducer(ctx context.Context, time m.errorCh = make(chan error, 1) if m.startError != nil { - // m.acceptsLogs = false return m.startError } @@ -297,14 +295,8 @@ func (m *MockedLogProducingContainer) StartLogProducer(ctx context.Context, time time.Sleep(m.startSleep) } - // m.logMutex.Lock() - // defer m.logMutex.Unlock() - // m.acceptsLogs = true - - // store index of last processed log - // iterate over m.messages in a goroutine and accept new logs go func() { - fmt.Println("starting log producer loop") + // fmt.Println("starting log producer loop") lastProcessedLogIndex := -1 for { time.Sleep(200 * time.Millisecond) @@ -312,21 +304,20 @@ func (m *MockedLogProducingContainer) StartLogProducer(ctx context.Context, time // m.lock("loop") m.errorCh <- m.errorChannelError if m.errorChannelError != nil { - fmt.Println("stopping log producer loop") + // fmt.Println("stopping log producer loop") // m.unlock("loop") return } // m.unlock("loop") } for i, msg := range m.messages { - time.Sleep(200 * time.Millisecond) - // fmt.Printf("lastProcessedLogIndex: %d, i: %d\n", lastProcessedLogIndex, i) + time.Sleep(50 * time.Millisecond) if i <= lastProcessedLogIndex { - fmt.Println("skipping log") + // fmt.Println("skipping log") continue } lastProcessedLogIndex = i - fmt.Println("processing log") + // fmt.Println("processing log") m.consumer.Accept(testcontainers.Log{ LogType: testcontainers.StdoutLog, Content: []byte(msg), @@ -356,22 +347,22 @@ func (m *MockedLogProducingContainer) GetContainerID() string { func (m *MockedLogProducingContainer) SendLog(msg string) { m.messages = append(m.messages, msg) - fmt.Println("new log sent") + // fmt.Println("new log sent") } -func (m *MockedLogProducingContainer) lock(msg string) { - m.logMutex.Lock() - fmt.Printf("lock acquired: %s\n", msg) -} +// func (m *MockedLogProducingContainer) lock(msg string) { +// m.logMutex.Lock() +// fmt.Printf("lock acquired: %s\n", msg) +// } -func (m *MockedLogProducingContainer) unlock(msg string) { - m.logMutex.Unlock() - fmt.Printf("lock released: %s\n", msg) -} +// func (m *MockedLogProducingContainer) unlock(msg string) { +// m.logMutex.Unlock() +// fmt.Printf("lock released: %s\n", msg) +// } -// make sure that before it stopped working it received at least 1 log -// and that when it's started again, then it removes old logs and then receives old ones again and then follow the new ones -func TestLogWatchConnectRetryMockContainer(t *testing.T) { +// secenario: log watch consumes a log, then the container returns an error, log watch reconnects +// and consumes logs again. log watch should not miss any logs nor consume any log twice +func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -379,51 +370,117 @@ func TestLogWatchConnectRetryMockContainer(t *testing.T) { interval := float64(1.12) mockedContainer := &MockedLogProducingContainer{ - name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), - id: uuid, - // isRunning: true, + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), + id: uuid, + isRunning: true, startError: nil, stopError: nil, errorChannelError: nil, - // acceptsLogs: true, } lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) - require.NoError(t, err) + require.NoError(t, err, "log watch should be created") go func() { + // wait for 1 second, so that log watch has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) - { - // mockedContainer.lock("set error") - mockedContainer.startSleep = 1 * time.Second - mockedContainer.errorChannelError = errors.New("test error") - // mockedContainer.unlock("set error") - } - // mockedContainer.startSleep = 1 * time.Second + mockedContainer.startSleep = 1 * time.Second + logsReceived := len(lw.ContainerLogs(mockedContainer.name)) + require.True(t, logsReceived > 0, "should have received at least 1 log before injecting error") + mockedContainer.errorChannelError = errors.New("failed to read logs") + + // clear the error after 1 second, so that log producer can resume log consumption time.Sleep(1 * time.Second) - { - // mockedContainer.lock("unset error") - mockedContainer.errorChannelError = nil - // mockedContainer.unlock("unset errors") + mockedContainer.errorChannelError = nil + }() + + logsSent := []string{} + go func() { + for i := 0; i < amount; i++ { + toSend := fmt.Sprintf("message-%d", i) + logsSent = append(logsSent, toSend) + mockedContainer.SendLog(toSend) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) } }() + err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) + require.NoError(t, err, "log watch should connect to container") + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 3*time.Second) + lw.PrintAll() + + require.EqualValues(t, lw.ContainerLogs(mockedContainer.name), logsSent, "log watch should receive all logs") + require.Equal(t, 2, mockedContainer.startCounter, "log producer should be started twice") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + }) +} + +// secenario: log watch consumes a log, then the container returns an error, log watch reconnects +// and consumes logs again, then it happens again. log watch should not miss any logs nor consume any log twice +func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + uuid := uuid.NewString() + amount := 10 + interval := float64(1.12) + + mockedContainer := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), + id: uuid, + isRunning: true, + startError: nil, + stopError: nil, + errorChannelError: nil, + } + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + require.NoError(t, err, "log watch should be created") + + go func() { + // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + time.Sleep(1 * time.Second) + mockedContainer.startSleep = 1 * time.Second + require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + mockedContainer.errorChannelError = errors.New("failed to read logs") + + // clear the error after 1 second, so that log producer can resume log consumption + time.Sleep(1 * time.Second) + mockedContainer.errorChannelError = nil + + // wait for 3 seconds so that some logs are consumed before we inject error again + time.Sleep(3 * time.Second) + mockedContainer.startSleep = 1 * time.Second + require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + mockedContainer.errorChannelError = errors.New("failed to read logs") + + // clear the error after 1 second, so that log producer can resume log consumption + time.Sleep(1 * time.Second) + mockedContainer.errorChannelError = nil + }() + + logsSent := []string{} go func() { - // time.Sleep(500 * time.Millisecond) for i := 0; i < amount; i++ { - mockedContainer.SendLog(fmt.Sprintf("message-%d", i)) + toSend := fmt.Sprintf("message-%d", i) + logsSent = append(logsSent, toSend) + mockedContainer.SendLog(toSend) time.Sleep(time.Duration(time.Duration(interval) * time.Second)) } }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err) + require.NoError(t, err, "log watch should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) lw.PrintAll() - require.Len(t, lw.ContainerLogs(mockedContainer.name), 10) - require.Equal(t, 2, mockedContainer.startCounter) + require.EqualValues(t, lw.ContainerLogs(mockedContainer.name), logsSent, "log watch should receive all logs") + require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started twice") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { @@ -432,8 +489,8 @@ func TestLogWatchConnectRetryMockContainer(t *testing.T) { }) } -// as above, but with 3 restarts, 2nd it is still running, 3rd it works -// it should have 10 logs in the end +// secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer +// is still running, but finally it stops and log watch reconnects. log watch should not miss any logs nor consume any log twice func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) @@ -444,41 +501,110 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { mockedContainer := &MockedLogProducingContainer{ name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), id: uuid, - isRunning: false, + isRunning: true, startError: nil, stopError: nil, errorChannelError: nil, } lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) - require.NoError(t, err) + require.NoError(t, err, "log watch should be created") + + go func() { + // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + time.Sleep(1 * time.Second) + mockedContainer.startSleep = 1 * time.Second + require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + + // introduce read error, so that log producer stops + mockedContainer.errorChannelError = errors.New("failed to read logs") + // inject start error, that simulates log producer still running (e.g. closing connection to the container) + mockedContainer.startError = errors.New("still running") + + // wait for one second before clearing errors, so that we retry to connect + time.Sleep(1 * time.Second) + mockedContainer.startError = nil + mockedContainer.errorChannelError = nil + }() + logsSent := []string{} go func() { for i := 0; i < amount; i++ { - mockedContainer.SendLog(fmt.Sprintf("message-%d", i)) + toSend := fmt.Sprintf("message-%d", i) + logsSent = append(logsSent, toSend) + mockedContainer.SendLog(toSend) time.Sleep(time.Duration(time.Duration(interval) * time.Second)) } - time.Sleep(500 * time.Millisecond) }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err) + require.NoError(t, err, "log watch should connect to container") + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) + lw.PrintAll() + + require.EqualValues(t, logsSent, lw.ContainerLogs(mockedContainer.name), "log watch should receive all logs") + require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started four times") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + }) +} + +// secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer +// is still running and log watch never reconnects. log watch should salvage logs that were consumed before error was injected +func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + uuid := uuid.NewString() + amount := 10 + interval := float64(1) + + mockedContainer := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid), + id: uuid, + isRunning: true, + startError: nil, + stopError: nil, + errorChannelError: nil, + } + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7)) + require.NoError(t, err, "log watch should be created") go func() { + // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + time.Sleep(6 * time.Second) mockedContainer.startSleep = 1 * time.Second - mockedContainer.isRunning = true - mockedContainer.errorChannelError = errors.New("read error") + require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + + // introduce read error, so that log producer stops + mockedContainer.errorChannelError = errors.New("failed to read logs") + // inject start error, that simulates log producer still running (e.g. closing connection to the container) mockedContainer.startError = errors.New("still running") - time.Sleep(2 * time.Second) - mockedContainer.startError = nil - mockedContainer.errorChannelError = nil }() + logsSent := []string{} + go func() { + for i := 0; i < amount; i++ { + toSend := fmt.Sprintf("message-%d", i) + logsSent = append(logsSent, toSend) + mockedContainer.SendLog(toSend) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) + } + }() + + err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) + require.NoError(t, err, "log watch should connect to container") + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) lw.PrintAll() - require.Len(t, lw.ContainerLogs(mockedContainer.name), 10) - require.Equal(t, 3, mockedContainer.startCounter) + // it should still salvage 6 logs that were consumed before error was injected and restarting failed + require.EqualValues(t, logsSent[:6], lw.ContainerLogs(mockedContainer.name), "log watch should receive six logs") + require.Equal(t, 7, mockedContainer.startCounter, "log producer should be started seven times") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { From 506c9bdb52358f600863e24673284a279ec0ca4a Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 30 Nov 2023 12:18:24 +0100 Subject: [PATCH 11/40] use my private testcontainers-go --- go.mod | 3 +-- go.sum | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 1294a48ee..831274507 100644 --- a/go.mod +++ b/go.mod @@ -54,8 +54,7 @@ require ( replace ( // replicating the replace directive on cosmos SDK github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - // github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231128125734-9878fec1b450 - github.com/testcontainers/testcontainers-go => ../testcontainers-go + github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56 k8s.io/api => k8s.io/api v0.25.11 k8s.io/apimachinery => k8s.io/apimachinery v0.25.11 diff --git a/go.sum b/go.sum index 72a42fb0b..c7b44e4f8 100644 --- a/go.sum +++ b/go.sum @@ -440,6 +440,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56 h1:HItfr1XKD/4xnsJE56m3uxnkMQ9lbg8xDnkf9qoZCH0= +github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56/go.mod h1:ICriE9bLX5CLxL9OFQ2N+2N+f+803LNJ1utJb1+Inx0= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= From 92ceaee808470700f3ca5cfb1fde38d53ecbde4e Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 30 Nov 2023 12:25:46 +0100 Subject: [PATCH 12/40] remove debug prints --- .../ethereum_network_e4eccee4.json | 35 ------------------- logwatch/logwatch_test.go | 31 ++++------------ 2 files changed, 6 insertions(+), 60 deletions(-) delete mode 100644 .private_chains/ethereum_network_e4eccee4.json diff --git a/.private_chains/ethereum_network_e4eccee4.json b/.private_chains/ethereum_network_e4eccee4.json deleted file mode 100644 index 084e9a042..000000000 --- a/.private_chains/ethereum_network_e4eccee4.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "consensus_type": "pos", - "consensus_layer": "prysm", - "execution_layer": "geth", - "docker_network_names": [ - "network-8bf1e6e0-c548-4cd8-b450-24ce360ca1d4" - ], - "containers": [ - { - "container_name": "geth2-7b45d1d1", - "container_type": "geth2" - }, - { - "container_name": "prysm-beacon-chain-3f00fc81", - "container_type": "prysm-beacon" - }, - { - "container_name": "prysm-validator-d799b911", - "container_type": "prysm-validator" - } - ], - "wait_for_finalization": false, - "generated_data_host_dir": "/var/folders/q4/975jb0kx3bb2x_yy0_7mf2y00000gn/T/custom_config_data1373575570", - "val_keys_dir": "/var/folders/q4/975jb0kx3bb2x_yy0_7mf2y00000gn/T/val_keys1464778088", - "ethereum_chain_config": { - "slots_per_epoch": 6, - "seconds_per_slot": 2, - "genesis_delay": 15, - "validator_count": 8, - "chain_id": 1337, - "addresses_to_fund": [ - "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" - ] - } -} \ No newline at end of file diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 37e078f9c..03d5b4063 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -271,8 +271,7 @@ type MockedLogProducingContainer struct { errorChannelError error startCounter int messages []string - // logMutex sync.Mutex - errorCh chan error + errorCh chan error } func (m *MockedLogProducingContainer) Name(ctx context.Context) (string, error) { @@ -296,28 +295,21 @@ func (m *MockedLogProducingContainer) StartLogProducer(ctx context.Context, time } go func() { - // fmt.Println("starting log producer loop") lastProcessedLogIndex := -1 for { time.Sleep(200 * time.Millisecond) - { - // m.lock("loop") - m.errorCh <- m.errorChannelError - if m.errorChannelError != nil { - // fmt.Println("stopping log producer loop") - // m.unlock("loop") - return - } - // m.unlock("loop") + + m.errorCh <- m.errorChannelError + if m.errorChannelError != nil { + return } + for i, msg := range m.messages { time.Sleep(50 * time.Millisecond) if i <= lastProcessedLogIndex { - // fmt.Println("skipping log") continue } lastProcessedLogIndex = i - // fmt.Println("processing log") m.consumer.Accept(testcontainers.Log{ LogType: testcontainers.StdoutLog, Content: []byte(msg), @@ -347,19 +339,8 @@ func (m *MockedLogProducingContainer) GetContainerID() string { func (m *MockedLogProducingContainer) SendLog(msg string) { m.messages = append(m.messages, msg) - // fmt.Println("new log sent") } -// func (m *MockedLogProducingContainer) lock(msg string) { -// m.logMutex.Lock() -// fmt.Printf("lock acquired: %s\n", msg) -// } - -// func (m *MockedLogProducingContainer) unlock(msg string) { -// m.logMutex.Unlock() -// fmt.Printf("lock released: %s\n", msg) -// } - // secenario: log watch consumes a log, then the container returns an error, log watch reconnects // and consumes logs again. log watch should not miss any logs nor consume any log twice func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { From b2d7a6aaa33c59049bef75bf84f33dda99f6aaa3 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 30 Nov 2023 12:33:54 +0100 Subject: [PATCH 13/40] run logwatch tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a10eaaab6..1ce6a9389 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ compile_contracts: python3 ./utils/compile_contracts.py test_unit: install_gotestfmt - go test -json -cover -covermode=count -coverprofile=unit-test-coverage.out ./client ./gauntlet ./testreporters ./docker/test_env ./k8s/config 2>&1 | tee /tmp/gotest.log | gotestfmt + go test -json -cover -covermode=count -coverprofile=unit-test-coverage.out ./client ./gauntlet ./testreporters ./docker/test_env ./k8s/config ./logwatch 2>&1 | tee /tmp/gotest.log | gotestfmt ####################### From 7dfd86e0c0bd95eeb6853f6c219a6d3770bdb6f7 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Fri, 1 Dec 2023 14:49:05 +0100 Subject: [PATCH 14/40] run_id support, grafana dashboard url --- .gitignore | 2 +- logwatch/logwatch.go | 32 +++++++++++++++++ logwatch/logwatch_handlers.go | 64 +++++++++++++++++++++++----------- logwatch/logwatch_helpers.go | 40 --------------------- logwatch/logwatch_user_test.go | 13 +++++-- testsummary/summary.go | 12 +++++-- testsummary/test_summary.json | 12 ------- 7 files changed, 98 insertions(+), 77 deletions(-) delete mode 100644 logwatch/logwatch_helpers.go delete mode 100644 testsummary/test_summary.json diff --git a/.gitignore b/.gitignore index cec553630..b676e40df 100644 --- a/.gitignore +++ b/.gitignore @@ -56,7 +56,7 @@ dist/ **/remote_runner_config.yaml logs/ -test_summary.json +.test_summary/ env/cmd/chaos env/bin/ diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index aebb040b4..15fe7fe1b 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -2,6 +2,7 @@ package logwatch import ( "context" + "fmt" "os" "regexp" "strings" @@ -9,12 +10,14 @@ import ( "testing" "time" + "github.com/google/uuid" "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/smartcontractkit/wasp" "github.com/testcontainers/testcontainers-go" "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/testsummary" "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" ) @@ -53,6 +56,7 @@ type LogWatch struct { logProducerTimeout time.Duration logProducerTimeoutRetryLimit int // -1 for infinite retries acceptMutex sync.Mutex + runId string } type LogContent struct { @@ -91,6 +95,8 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... enabledLogTargets: envLogTargets, } + logWatch.setOrGenerateRunId() + for _, option := range options { option(logWatch) } @@ -99,9 +105,29 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... return nil, err } + for _, handler := range logWatch.logTargetHandlers { + handler.SetRunId(logWatch.runId) + } + + l.Info().Str("Run_id", logWatch.runId).Msg("LogWatch initialized") + return logWatch, nil } +func (m *LogWatch) setOrGenerateRunId() { + inOs := os.Getenv("RUN_ID") + + if inOs != "" { + m.log.Info().Str("Run_id", inOs).Msg("Using run_id from env var") + m.runId = inOs + } + + runId := fmt.Sprintf("%s-%s", m.testName, uuid.NewString()[0:16]) + m.log.Info().Str("Run_id", runId).Msg("Generated run id") + + m.runId = runId +} + func (m *LogWatch) validateLogTargets() error { // check if all requested log targets are supported for _, wantedTarget := range m.enabledLogTargets { @@ -334,6 +360,12 @@ func (m *LogWatch) PrintLogTargetsLocations() { }) } +func (m *LogWatch) SaveLogLocationInTestSummary() { + m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { + return testsummary.AddEntry(testName, name, location) + }) +} + func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { for _, handler := range m.logTargetHandlers { name := string(handler.GetTarget()) diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index b9930a839..143e5706a 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/pkg/errors" @@ -23,6 +24,8 @@ type HandleLogTarget interface { Handle(*ContainerLogConsumer, LogContent) error GetLogLocation(map[string]*ContainerLogConsumer) (string, error) GetTarget() LogTarget + SetRunId(string) + GetRunId() string } func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { @@ -37,6 +40,7 @@ func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { type FileLogHandler struct { logFolder string shouldSkipLogging bool + runId string } func (h *FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { @@ -76,7 +80,7 @@ func (h FileLogHandler) GetLogLocation(_ map[string]*ContainerLogConsumer) (stri func (h *FileLogHandler) getOrCreateLogFolder(testname string) (string, error) { if h.logFolder == "" { - folder := fmt.Sprintf("./logs/%s-%s", testname, time.Now().Format("2006-01-02T15-04-05")) + folder := fmt.Sprintf("./logs/%s-%s-%s", testname, time.Now().Format("2006-01-02T15-04-05"), h.runId) if err := os.MkdirAll(folder, os.ModePerm); err != nil { return "", err } @@ -90,10 +94,19 @@ func (h FileLogHandler) GetTarget() LogTarget { return File } +func (h *FileLogHandler) SetRunId(executionId string) { + h.runId = executionId +} + +func (h *FileLogHandler) GetRunId() string { + return h.runId +} + // streams logs to Loki type LokiLogHandler struct { grafanaUrl string shouldSkipLogging bool + runId string } func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { @@ -114,9 +127,10 @@ func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) err } // we can notify more than one time if it matches, but we push only once _ = c.lw.loki.Handle(model.LabelSet{ - "type": "log_watch", - "test": model.LabelValue(content.TestName), - "container": model.LabelValue(content.ContainerName), + "type": "log_watch", + "test": model.LabelValue(content.TestName), + "container_id": model.LabelValue(content.ContainerName), + "run_id": model.LabelValue(h.runId), }, time.Now(), string(content.Content)) return nil @@ -127,17 +141,28 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum return h.grafanaUrl, nil } - queries := make([]GrafanaExploreQuery, 0) + grafanaBaseUrl := os.Getenv("GRAFANA_URL") + if grafanaBaseUrl == "" { + return "", errors.New("GRAFANA_URL env var is not set") + } + + grafanaBaseUrl = strings.TrimSuffix(grafanaBaseUrl, "/") rangeFrom := time.Now() rangeTo := time.Now().Add(time.Minute) //just to make sure we get the last message + var sb strings.Builder + sb.WriteString(grafanaBaseUrl) + sb.WriteString("/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs?orgId=1&") + sb.WriteString(fmt.Sprintf("var-run_id=%s", h.runId)) + + if len(consumers) == 0 { + return "", errors.New("no Loki consumers found") + } + for _, c := range consumers { if c.hasLogTarget(Loki) { - queries = append(queries, GrafanaExploreQuery{ - refId: c.name, - container: c.name, - }) + sb.WriteString(fmt.Sprintf("&var-container_id=%s", c.name)) } // lets find the oldest log message to know when to start the range from @@ -161,17 +186,8 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum } } - if len(queries) == 0 { - return "", errors.New("no Loki consumers found") - } - - h.grafanaUrl = GrafanaExploreUrl{ - baseurl: os.Getenv("GRAFANA_URL"), - datasource: os.Getenv("GRAFANA_DATASOURCE"), - queries: queries, - rangeFrom: rangeFrom.UnixMilli(), - rangeTo: rangeTo.UnixMilli(), - }.getUrl() + sb.WriteString(fmt.Sprintf("&from=%d&to=%d", rangeFrom.UnixMilli(), rangeTo.UnixMilli())) + h.grafanaUrl = sb.String() return h.grafanaUrl, nil } @@ -179,3 +195,11 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum func (h LokiLogHandler) GetTarget() LogTarget { return Loki } + +func (h *LokiLogHandler) SetRunId(executionId string) { + h.runId = executionId +} + +func (h *LokiLogHandler) GetRunId() string { + return h.runId +} diff --git a/logwatch/logwatch_helpers.go b/logwatch/logwatch_helpers.go deleted file mode 100644 index a1a3de4d8..000000000 --- a/logwatch/logwatch_helpers.go +++ /dev/null @@ -1,40 +0,0 @@ -package logwatch - -import ( - "fmt" - "strings" -) - -type GrafanaExploreUrl struct { - baseurl string - datasource string - queries []GrafanaExploreQuery - rangeFrom int64 - rangeTo int64 -} - -type GrafanaExploreQuery struct { - refId string - container string -} - -func (g GrafanaExploreUrl) getUrl() string { - url := g.baseurl - - if strings.HasSuffix(url, "/") && len(url) > 0 { - url = url[:len(url)-1] - } - - url += "/explore?panes=" - url += "{\"_an\":{\"datasource\":\"" + g.datasource + "\",\"queries\":[" - for i, query := range g.queries { - url += "{\"refId\":\"" + query.refId + "\",\"expr\":\"{container=\\\"" + query.container + "\\\"}\",\"queryType\":\"range\",\"datasource\":{\"type\":\"loki\",\"uid\":\"" + g.datasource + "\"},\"editorMode\":\"builder\",\"hide\":false}" - if i < len(g.queries)-1 { - url += "," - } - } - - url += "],\"range\":{\"from\":\"" + fmt.Sprint(g.rangeFrom) + "\",\"to\":\"" + fmt.Sprint(g.rangeTo) + "\"}}}&schemaVersion=1&orgId=1" - - return url -} diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index 5b04b976b..2b634436a 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -157,8 +157,9 @@ func TestFileLoggingTarget(t *testing.T) { } type MockedLogHandler struct { - logs []logwatch.LogContent - Target logwatch.LogTarget + logs []logwatch.LogContent + Target logwatch.LogTarget + executionId string } func (m *MockedLogHandler) Handle(consumer *logwatch.ContainerLogConsumer, content logwatch.LogContent) error { @@ -174,6 +175,14 @@ func (m *MockedLogHandler) GetTarget() logwatch.LogTarget { return m.Target } +func (m *MockedLogHandler) SetRunId(executionId string) { + m.executionId = executionId +} + +func (m *MockedLogHandler) GetRunId() string { + return m.executionId +} + func TestMultipleMockedLoggingTargets(t *testing.T) { ctx := context.Background() testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} diff --git a/testsummary/summary.go b/testsummary/summary.go index ff58eb578..f8601dc89 100644 --- a/testsummary/summary.go +++ b/testsummary/summary.go @@ -2,6 +2,7 @@ package testsummary import ( "encoding/json" + "fmt" "io" "os" "sync" @@ -9,7 +10,10 @@ import ( "github.com/pkg/errors" ) -const SUMMARY_FILE = "test_summary.json" +var ( + SUMMARY_FOLDER = ".test_summary" + SUMMARY_FILE = fmt.Sprintf("%s/test_summary.json", SUMMARY_FOLDER) +) type SummaryKeys map[string][]KeyContent @@ -30,7 +34,11 @@ func AddEntry(testName, key string, value interface{}) error { } strValue := value.(string) - f, err := os.OpenFile(SUMMARY_FILE, os.O_CREATE|os.O_WRONLY, 0644) + if err := os.MkdirAll(SUMMARY_FOLDER, 0755); err != nil { + return err + } + + f, err := os.OpenFile(SUMMARY_FILE, os.O_CREATE|os.O_RDWR, 0644) if err != nil { return err } diff --git a/testsummary/test_summary.json b/testsummary/test_summary.json deleted file mode 100644 index 61a4268d7..000000000 --- a/testsummary/test_summary.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "grafana_url": [ - { - "test_name": "first test", - "value": "https://grafana_url.pl/1" - }, - { - "test_name": "second test", - "value": "https://grafana_url.pl/2" - } - ] -} From d4ed6fd637105773adcfdf03532fc550f31fc655 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Fri, 1 Dec 2023 19:13:05 +0100 Subject: [PATCH 15/40] buffer logs in temp file before pushing them to targets, so that we save logs only for failed/required tests --- logwatch/logwatch.go | 178 +++++++++++++++++++++++++++------ logwatch/logwatch_handlers.go | 2 +- logwatch/logwatch_user_test.go | 20 ++-- testsummary/summary.go | 22 +++- utils/runid/run_id.go | 17 ++++ 5 files changed, 201 insertions(+), 38 deletions(-) create mode 100644 utils/runid/run_id.go diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 15fe7fe1b..f66689dc9 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -2,7 +2,9 @@ package logwatch import ( "context" + "encoding/gob" "fmt" + "io" "os" "regexp" "strings" @@ -19,6 +21,7 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/testsummary" "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" + "github.com/smartcontractkit/chainlink-testing-framework/utils/runid" ) const NO_TEST = "no_test" @@ -63,6 +66,7 @@ type LogContent struct { TestName string ContainerName string Content []byte + Time time.Time } type Option func(*LogWatch) @@ -93,10 +97,9 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... logProducerTimeout: time.Duration(10 * time.Second), logProducerTimeoutRetryLimit: 10, enabledLogTargets: envLogTargets, + runId: fmt.Sprintf("%s-%s", testName, runid.GetOrGenerateRunId()), } - logWatch.setOrGenerateRunId() - for _, option := range options { option(logWatch) } @@ -114,20 +117,6 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... return logWatch, nil } -func (m *LogWatch) setOrGenerateRunId() { - inOs := os.Getenv("RUN_ID") - - if inOs != "" { - m.log.Info().Str("Run_id", inOs).Msg("Using run_id from env var") - m.runId = inOs - } - - runId := fmt.Sprintf("%s-%s", m.testName, uuid.NewString()[0:16]) - m.log.Info().Str("Run_id", runId).Msg("Generated run id") - - m.runId = runId -} - func (m *LogWatch) validateLogTargets() error { // check if all requested log targets are supported for _, wantedTarget := range m.enabledLogTargets { @@ -230,9 +219,13 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC var cons *ContainerLogConsumer if prefix != "" { - cons = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) + cons, err = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) } else { - cons = newContainerLogConsumer(m, name, name, enabledLogTargets...) + cons, err = newContainerLogConsumer(m, name, name, enabledLogTargets...) + } + + if err != nil { + return err } m.log.Info(). @@ -427,6 +420,76 @@ func (m *LogWatch) PrintAll() { } } +// FlushLogsToTargets flushes all logs for all consumers (containers) to their targets +func (m *LogWatch) FlushLogsToTargets() error { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() + + m.log.Info().Msg("Flushing logs to targets") + for _, consumer := range m.consumers { + // nothing to do if no log targets are configured + if len(consumer.logTargets) == 0 { + continue + } + + if consumer.tempFile == nil { + return errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name) + } + + // do not accept any new logs + consumer.isDone = true + // this was done on purpose, so that when we are done flushing all logs we can close the temp file and handle abrupt termination too + // nolint + defer consumer.tempFile.Close() + + _, err := consumer.tempFile.Seek(0, 0) + if err != nil { + return err + } + + decoder := gob.NewDecoder(consumer.tempFile) + counter := 0 + + //TODO handle in batches? + for { + var log LogContent + decodeErr := decoder.Decode(&log) + if decodeErr == nil { + counter++ + for _, logTarget := range consumer.logTargets { + if handler, ok := consumer.lw.logTargetHandlers[logTarget]; ok { + if err := handler.Handle(consumer, log); err != nil { + m.log.Error(). + Err(err). + Str("Container", consumer.name). + Str("log target", string(logTarget)). + Msg("Failed to handle log target") + } + } else { + m.log.Warn(). + Str("Container", consumer.name). + Str("log target", string(logTarget)). + Msg("No handler found for log target") + } + } + } else if errors.Is(decodeErr, io.EOF) { + m.log.Info(). + Int("Log count", counter). + Str("Container", consumer.name). + Msg("Finished flushing logs") + break + } else { + return decodeErr + } + } + } + + m.log.Info(). + Msg("Flushed all logs to targets") + + return nil +} + // ContainerLogConsumer is a container log lines consumer type ContainerLogConsumer struct { name string @@ -434,48 +497,107 @@ type ContainerLogConsumer struct { logTargets []LogTarget lw *LogWatch Messages []string + tempFile *os.File + encoder *gob.Encoder + isDone bool + hasErrored bool } // newContainerLogConsumer creates new log consumer for a container that // - signal if log line matches the pattern // - push all lines to configured log targets -func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, logTargets ...LogTarget) *ContainerLogConsumer { - return &ContainerLogConsumer{ +func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { + consumer := &ContainerLogConsumer{ name: containerName, prefix: prefix, logTargets: logTargets, lw: lw, Messages: make([]string, 0), + isDone: false, + hasErrored: false, } + + if len(logTargets) == 0 { + return consumer, nil + } + + tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-%s-datafile.gob", containerName, uuid.NewString()[0:8])) + if err != nil { + return nil, err + } + + consumer.tempFile = tempFile + consumer.encoder = gob.NewEncoder(tempFile) + + return consumer, nil } -// Accept accepts the log message from particular container +// Accept accepts the log message from particular container and saves it to the temp gob file func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { g.lw.acceptMutex.Lock() defer g.lw.acceptMutex.Unlock() + + if g.hasErrored { + return + } + + if g.isDone { + g.lw.log.Error(). + Str("Test", g.lw.testName). + Str("Container", g.name). + Str("Log", string(l.Content)). + Msg("Consumer has finished, but you are still trying to accept logs. This should never happen") + return + } + g.Messages = append(g.Messages, string(l.Content)) matches := g.FindMatch(l) for i := 0; i < matches; i++ { g.lw.notifyTest <- &LogNotification{Container: g.name, Prefix: g.prefix, Log: string(l.Content)} } + // if no log targets are configured, we don't need to save the logs + if len(g.logTargets) == 0 { + return + } + + if g.tempFile == nil || g.encoder == nil { + g.hasErrored = true + g.lw.log.Error(). + Msg("temp file or encoder is nil, consumer cannot work, this should never happen") + return + } + content := LogContent{ TestName: g.lw.testName, ContainerName: g.name, Content: l.Content, + Time: time.Now(), } - for _, logTarget := range g.logTargets { - if handler, ok := g.lw.logTargetHandlers[logTarget]; ok { - if err := handler.Handle(g, content); err != nil { - g.lw.log.Error().Err(err).Msg("Failed to handle log target") - } - } else { - g.lw.log.Warn().Str("log target", string(logTarget)).Msg("No handler found for log target") + if err := g.streamLogToTempFile(content); err != nil { + g.lw.log.Error(). + Err(err). + Str("Container", g.name). + Msg("Failed to stream log to temp file") + g.hasErrored = true + err = g.tempFile.Close() + if err != nil { + g.lw.log.Error(). + Err(err). + Msg("Failed to close temp file") } } } +func (g *ContainerLogConsumer) streamLogToTempFile(content LogContent) error { + if g.encoder == nil { + return errors.New("encoder is nil, this should never happen") + } + + return g.encoder.Encode(content) +} + // FindMatch check multiple regex patterns for the same string // can be checked with one regex, made for readability of user-facing API func (g *ContainerLogConsumer) FindMatch(l testcontainers.Log) int { diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 143e5706a..76143d212 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -131,7 +131,7 @@ func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) err "test": model.LabelValue(content.TestName), "container_id": model.LabelValue(content.ContainerName), "run_id": model.LabelValue(h.runId), - }, time.Now(), string(content.Content)) + }, content.Time, string(content.Content)) return nil } diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index 2b634436a..8a1a6717f 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -133,9 +133,9 @@ func TestFileLoggingTarget(t *testing.T) { nil, logwatch.WithLogTarget(logwatch.File), ) - require.NoError(t, err) + require.NoError(t, err, "failed to create logwatch") err = d.ConnectLogs(lw) - require.NoError(t, err) + require.NoError(t, err, "failed to connect logs") time.Sleep(2 * time.Second) @@ -146,10 +146,12 @@ func TestFileLoggingTarget(t *testing.T) { return nil } + err = lw.FlushLogsToTargets() + require.NoError(t, err, "failed to flush logs to targets") lw.SaveLogTargetsLocations(bufferWriter) content, err := os.ReadFile(logFileLocation + "/container-0.log") - require.NoError(t, err) + require.NoError(t, err, "failed to read log file") require.True(t, bytes.Contains(content, A), "A should be present in log file") require.True(t, bytes.Contains(content, B), "B should be present in log file") @@ -200,11 +202,13 @@ func TestMultipleMockedLoggingTargets(t *testing.T) { logwatch.WithLogTarget(logwatch.Loki), logwatch.WithLogTarget(logwatch.File), ) - require.NoError(t, err) + require.NoError(t, err, "failed to create logwatch") err = d.ConnectLogs(lw) - require.NoError(t, err) + require.NoError(t, err, "failed to connect logs") time.Sleep(2 * time.Second) + err = lw.FlushLogsToTargets() + require.NoError(t, err, "failed to flush logs to targets") assertMockedHandlerHasLogs(t, mockedFileHandler) assertMockedHandlerHasLogs(t, mockedLokiHanlder) @@ -224,11 +228,13 @@ func TestOneMockedLoggingTarget(t *testing.T) { logwatch.WithCustomLogHandler(logwatch.Loki, mockedLokiHanlder), logwatch.WithLogTarget(logwatch.Loki), ) - require.NoError(t, err) + require.NoError(t, err, "failed to create logwatch") err = d.ConnectLogs(lw) - require.NoError(t, err) + require.NoError(t, err, "failed to connect logs") time.Sleep(2 * time.Second) + err = lw.FlushLogsToTargets() + require.NoError(t, err, "failed to flush logs to targets") assertMockedHandlerHasLogs(t, mockedLokiHanlder) } diff --git a/testsummary/summary.go b/testsummary/summary.go index f8601dc89..49d4986cf 100644 --- a/testsummary/summary.go +++ b/testsummary/summary.go @@ -5,14 +5,18 @@ import ( "fmt" "io" "os" + "strings" "sync" + "time" "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-testing-framework/utils/runid" ) var ( SUMMARY_FOLDER = ".test_summary" - SUMMARY_FILE = fmt.Sprintf("%s/test_summary.json", SUMMARY_FOLDER) + SUMMARY_FILE = fmt.Sprintf("%s/test_summary-%s-%s.json", SUMMARY_FOLDER, time.Now().Format("2006-01-02T15-04-05"), runid.GetOrGenerateRunId()) ) type SummaryKeys map[string][]KeyContent @@ -52,7 +56,11 @@ func AddEntry(testName, key string, value interface{}) error { var entries SummaryKeys err = json.Unmarshal(fc, &entries) if err != nil { - return err + if !strings.Contains(err.Error(), "unexpected end of JSON input") { + return err + } + + entries = make(SummaryKeys) } if entry, ok := entries[key]; ok { @@ -74,6 +82,16 @@ func AddEntry(testName, key string, value interface{}) error { entries[key] = []KeyContent{{TestName: testName, Value: strValue}} } + _, err = f.Seek(0, 0) + if err != nil { + return err + } + + err = f.Truncate(0) + if err != nil { + return err + } + encoder := json.NewEncoder(f) err = encoder.Encode(entries) if err != nil { diff --git a/utils/runid/run_id.go b/utils/runid/run_id.go new file mode 100644 index 000000000..fbef88948 --- /dev/null +++ b/utils/runid/run_id.go @@ -0,0 +1,17 @@ +package runid + +import ( + "os" + + "github.com/google/uuid" +) + +func GetOrGenerateRunId() string { + inOs := os.Getenv("RUN_ID") + + if inOs != "" { + return inOs + } + + return uuid.NewString()[0:16] +} From cfe4a6224e9b0a15a9f2f7108a69f66d19721b7b Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 11:49:28 -0300 Subject: [PATCH 16/40] =?UTF-8?q?share=20RUN=5FID=20between=20tests,=20whe?= =?UTF-8?q?n=20run=20on=20local=C2=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- logwatch/logwatch.go | 13 ++++++++++--- testsummary/summary.go | 11 +++++++++-- utils/runid/run_id.go | 33 ++++++++++++++++++++++++++++++--- 3 files changed, 49 insertions(+), 8 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index f66689dc9..16ab4bde4 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -86,6 +86,11 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... return nil, err } + runId, err := runid.GetOrGenerateRunId() + if err != nil { + return nil, err + } + logWatch := &LogWatch{ testName: testName, log: l, @@ -97,7 +102,7 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... logProducerTimeout: time.Duration(10 * time.Second), logProducerTimeoutRetryLimit: 10, enabledLogTargets: envLogTargets, - runId: fmt.Sprintf("%s-%s", testName, runid.GetOrGenerateRunId()), + runId: runId, } for _, option := range options { @@ -451,6 +456,7 @@ func (m *LogWatch) FlushLogsToTargets() error { counter := 0 //TODO handle in batches? + LOG_LOOP: for { var log LogContent decodeErr := decoder.Decode(&log) @@ -463,13 +469,14 @@ func (m *LogWatch) FlushLogsToTargets() error { Err(err). Str("Container", consumer.name). Str("log target", string(logTarget)). - Msg("Failed to handle log target") + Msg("Failed to handle log target. Aborting") + break LOG_LOOP } } else { m.log.Warn(). Str("Container", consumer.name). Str("log target", string(logTarget)). - Msg("No handler found for log target") + Msg("No handler found for log target. Aborting") } } } else if errors.Is(decodeErr, io.EOF) { diff --git a/testsummary/summary.go b/testsummary/summary.go index 49d4986cf..a0a6ace89 100644 --- a/testsummary/summary.go +++ b/testsummary/summary.go @@ -16,7 +16,8 @@ import ( var ( SUMMARY_FOLDER = ".test_summary" - SUMMARY_FILE = fmt.Sprintf("%s/test_summary-%s-%s.json", SUMMARY_FOLDER, time.Now().Format("2006-01-02T15-04-05"), runid.GetOrGenerateRunId()) + SUMMARY_FILE string + mu sync.Mutex ) type SummaryKeys map[string][]KeyContent @@ -26,7 +27,13 @@ type KeyContent struct { Value string `json:"value"` } -var mu sync.Mutex +func init() { + runId, err := runid.GetOrGenerateRunId() + if err != nil { + panic(err) + } + SUMMARY_FILE = fmt.Sprintf("%s/test_summary-%s-%s.json", SUMMARY_FOLDER, time.Now().Format("2006-01-02T15-04-05"), runId) +} // TODO in future allow value to be also []string or map[string]string? func AddEntry(testName, key string, value interface{}) error { diff --git a/utils/runid/run_id.go b/utils/runid/run_id.go index fbef88948..093589cbb 100644 --- a/utils/runid/run_id.go +++ b/utils/runid/run_id.go @@ -1,17 +1,44 @@ package runid import ( + "bufio" "os" "github.com/google/uuid" ) -func GetOrGenerateRunId() string { +func GetOrGenerateRunId() (string, error) { inOs := os.Getenv("RUN_ID") if inOs != "" { - return inOs + return inOs, nil } - return uuid.NewString()[0:16] + file, err := os.OpenFile(".run.id", os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return "", err + } + defer file.Close() + scanner := bufio.NewScanner(file) + var runId string + + for scanner.Scan() { + runId = scanner.Text() + } + + if err := scanner.Err(); err != nil { + return "", err + } + + if runId != "" { + return runId, nil + } + + runId = uuid.NewString() + + if _, err := file.WriteString(runId); err != nil { + return "", err + } + + return runId, nil } From 48b946706522fd8a3d29e1ea8c048d92ec0698bc Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 11:52:22 -0300 Subject: [PATCH 17/40] git ignore .run-id --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index b676e40df..1a8d33630 100644 --- a/.gitignore +++ b/.gitignore @@ -57,6 +57,7 @@ dist/ logs/ .test_summary/ +.run.id env/cmd/chaos env/bin/ From 1f1ed08f3234fd65cdb12fa7837534c4f091976b Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 15:38:40 -0300 Subject: [PATCH 18/40] remove pattern matching and default in-memory message storing --- logwatch/logwatch.go | 268 ++++++++++++++++++--------------- logwatch/logwatch_handlers.go | 51 ++++++- logwatch/logwatch_test.go | 128 ++++++---------- logwatch/logwatch_user_test.go | 51 ------- 4 files changed, 246 insertions(+), 252 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 16ab4bde4..107b78358 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -49,8 +49,6 @@ type LogWatch struct { testName string log zerolog.Logger loki *wasp.LokiClient - patterns map[string][]*regexp.Regexp - notifyTest chan *LogNotification containers []LogProducingContainer consumers map[string]*ContainerLogConsumer logTargetHandlers map[LogTarget]HandleLogTarget @@ -94,8 +92,6 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... logWatch := &LogWatch{ testName: testName, log: l, - patterns: patterns, - notifyTest: make(chan *LogNotification, 10000), consumers: make(map[string]*ContainerLogConsumer, 0), logTargetHandlers: getDefaultLogHandlers(), logListeningDone: make(chan struct{}, 1), @@ -184,30 +180,6 @@ func WithLogProducerTimeoutRetryLimit(retryLimit int) Option { } } -// Listen listen for the next notification -func (m *LogWatch) Listen() *LogNotification { - msg := <-m.notifyTest - m.log.Warn(). - Str("Container", msg.Container). - Str("Line", msg.Log). - Msg("Received notification from container") - return msg -} - -// OnMatch calling your testing hook on first match -func (m *LogWatch) OnMatch(f func(ln *LogNotification)) { - go func() { - for { - msg := <-m.notifyTest - m.log.Warn(). - Str("Container", msg.Container). - Str("Line", msg.Log). - Msg("Received notification from container") - f(msg) - } - }() -} - // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { name, err := container.Name(ctx) @@ -276,13 +248,19 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Str("Container name", name). Str("Timeout", timeout.String()). Msgf("Retrying connection and listening to container logs. Attempt %d/%d", currentAttempt, retryLimit) - // when log producer starts again it will request all logs again, so we need to remove ones already saved by log watch to avoid duplicates - // in the unlikely case that log producer fails to start we will copy the messages received so far, so that at least some logs are salvaged - messagesCopy := append([]string{}, m.consumers[name].Messages...) - m.consumers[name].Messages = make([]string, 0) - m.log.Warn().Msgf("Consumer messages: %d", len(m.consumers[name].Messages)) failedToStart := false + //TODO if there are many failures here we could save the file and restore it content if we fail to + //create a new temp file + resetErr := cons.ResetTempFile() + if resetErr != nil { + m.log.Error(). + Err(resetErr). + Str("Container name", name). + Msg("Failed to reset temp file. Stopping logging") + + return + } for container.StartLogProducer(ctx, timeout) != nil { if !shouldRetry() { failedToStart = true @@ -298,7 +276,6 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Err(err). Str("Container name", name). Msg("Previously running log producer couldn't be stopped. Used all retry attempts. Won't try again") - m.consumers[name].Messages = messagesCopy return } m.log.Info(). @@ -336,7 +313,11 @@ func (m *LogWatch) Shutdown(context context.Context) error { Str("Name", name). Msg("Failed to disconnect container") - err = errors.Wrap(singleErr, "failed to disconnect container") + if err == nil { + err = singleErr + } else { + err = errors.Wrap(err, singleErr.Error()) + } } } @@ -389,48 +370,44 @@ func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { return nil } +var noOpConsumerFn = func(consumer *ContainerLogConsumer) error { + return nil +} + // ContainerLogs return all logs for the particular container -func (m *LogWatch) ContainerLogs(name string) []string { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - if _, ok := m.consumers[name]; !ok { - return []string{} +func (m *LogWatch) ContainerLogs(name string) ([]string, error) { + logs := []string{} + var getLogsFn = func(consumer *ContainerLogConsumer, log LogContent) error { + if consumer.name == name { + logs = append(logs, string(log.Content)) + } + return nil } - return m.consumers[name].Messages -} - -// AllLogs returns all logs for all containers -func (m *LogWatch) AllLogs() []string { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - logs := make([]string, 0) - for _, l := range m.consumers { - logs = append(logs, l.Messages...) + err := m.getAllLogsAndExecute(noOpConsumerFn, getLogsFn, noOpConsumerFn) + if err != nil { + return []string{}, err } - return logs + + return logs, err } -// PrintAll prints all logs for all containers connected -func (m *LogWatch) PrintAll() { +func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLogConsumer) error, executeFn func(consumer *ContainerLogConsumer, log LogContent) error, cleanUpFn func(consumer *ContainerLogConsumer) error) error { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() - for cname, c := range m.consumers { - for _, msg := range c.Messages { - m.log.Info(). - Str("Container", cname). - Str("Msg", msg). - Send() + + var loopErr error + var attachError = func(err error) { + if err == nil { + return + } + if loopErr == nil { + loopErr = err + } else { + loopErr = errors.Wrap(loopErr, err.Error()) } } -} - -// FlushLogsToTargets flushes all logs for all consumers (containers) to their targets -func (m *LogWatch) FlushLogsToTargets() error { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - m.log.Info().Msg("Flushing logs to targets") for _, consumer := range m.consumers { // nothing to do if no log targets are configured if len(consumer.logTargets) == 0 { @@ -441,11 +418,23 @@ func (m *LogWatch) FlushLogsToTargets() error { return errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name) } - // do not accept any new logs - consumer.isDone = true - // this was done on purpose, so that when we are done flushing all logs we can close the temp file and handle abrupt termination too - // nolint - defer consumer.tempFile.Close() + preExecuteErr := preExecuteFn(consumer) + if preExecuteErr != nil { + m.log.Error(). + Err(preExecuteErr). + Str("Container", consumer.name). + Msg("Failed to run pre-execute function") + attachError(preExecuteErr) + break + } + + // set the cursor to the end of the file, when done to resume writing + //revive:disable + defer func() { + _, deferErr := consumer.tempFile.Seek(0, 2) + attachError(deferErr) + }() + //revive:enable _, err := consumer.tempFile.Seek(0, 0) if err != nil { @@ -462,39 +451,88 @@ func (m *LogWatch) FlushLogsToTargets() error { decodeErr := decoder.Decode(&log) if decodeErr == nil { counter++ - for _, logTarget := range consumer.logTargets { - if handler, ok := consumer.lw.logTargetHandlers[logTarget]; ok { - if err := handler.Handle(consumer, log); err != nil { - m.log.Error(). - Err(err). - Str("Container", consumer.name). - Str("log target", string(logTarget)). - Msg("Failed to handle log target. Aborting") - break LOG_LOOP - } - } else { - m.log.Warn(). - Str("Container", consumer.name). - Str("log target", string(logTarget)). - Msg("No handler found for log target. Aborting") - } + executeErr := executeFn(consumer, log) + if executeErr != nil { + m.log.Error(). + Err(executeErr). + Str("Container", consumer.name). + Msg("Failed to run execute function") + attachError(preExecuteErr) + break LOG_LOOP } } else if errors.Is(decodeErr, io.EOF) { m.log.Info(). Int("Log count", counter). Str("Container", consumer.name). - Msg("Finished flushing logs") + Msg("Finished getting logs") break } else { return decodeErr } } + + c := consumer + + // done on purpose + //revive:disable + defer func() { + attachError(cleanUpFn(c)) + }() + //revive:enable } - m.log.Info(). - Msg("Flushed all logs to targets") + return loopErr +} - return nil +// FlushLogsToTargets flushes all logs for all consumers (containers) to their targets +func (m *LogWatch) FlushLogsToTargets() error { + var preExecuteFn = func(consumer *ContainerLogConsumer) error { + // do not accept any new logs + consumer.isDone = true + + return nil + } + var flushLogsFn = func(consumer *ContainerLogConsumer, log LogContent) error { + for _, logTarget := range consumer.logTargets { + if handler, ok := consumer.lw.logTargetHandlers[logTarget]; ok { + if err := handler.Handle(consumer, log); err != nil { + m.log.Error(). + Err(err). + Str("Container", consumer.name). + Str("log target", string(logTarget)). + Msg("Failed to handle log target. Aborting") + return err + } + } else { + m.log.Warn(). + Str("Container", consumer.name). + Str("log target", string(logTarget)). + Msg("No handler found for log target. Aborting") + } + } + + return nil + } + + var closeTempFileFn = func(consumer *ContainerLogConsumer) error { + if consumer.tempFile == nil { + return errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name) + } + + return consumer.tempFile.Close() + } + + flushErr := m.getAllLogsAndExecute(preExecuteFn, flushLogsFn, closeTempFileFn) + if flushErr != nil { + m.log.Info(). + Msg("Finished flushing logs") + } else { + m.log.Info(). + Err(flushErr). + Msg("Failed to flush logs") + } + + return flushErr } // ContainerLogConsumer is a container log lines consumer @@ -503,7 +541,6 @@ type ContainerLogConsumer struct { prefix string logTargets []LogTarget lw *LogWatch - Messages []string tempFile *os.File encoder *gob.Encoder isDone bool @@ -519,7 +556,6 @@ func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, prefix: prefix, logTargets: logTargets, lw: lw, - Messages: make([]string, 0), isDone: false, hasErrored: false, } @@ -539,6 +575,24 @@ func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, return consumer, nil } +func (g *ContainerLogConsumer) ResetTempFile() error { + if g.tempFile != nil { + if err := g.tempFile.Close(); err != nil { + return err + } + } + + tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-%s-datafile.gob", g.name, uuid.NewString()[0:8])) + if err != nil { + return err + } + + g.tempFile = tempFile + g.encoder = gob.NewEncoder(tempFile) + + return nil +} + // Accept accepts the log message from particular container and saves it to the temp gob file func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { g.lw.acceptMutex.Lock() @@ -557,12 +611,6 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { return } - g.Messages = append(g.Messages, string(l.Content)) - matches := g.FindMatch(l) - for i := 0; i < matches; i++ { - g.lw.notifyTest <- &LogNotification{Container: g.name, Prefix: g.prefix, Log: string(l.Content)} - } - // if no log targets are configured, we don't need to save the logs if len(g.logTargets) == 0 { return @@ -605,26 +653,6 @@ func (g *ContainerLogConsumer) streamLogToTempFile(content LogContent) error { return g.encoder.Encode(content) } -// FindMatch check multiple regex patterns for the same string -// can be checked with one regex, made for readability of user-facing API -func (g *ContainerLogConsumer) FindMatch(l testcontainers.Log) int { - matchesPerPattern := 0 - if g.prefix == "" { - g.prefix = g.name - } - for _, filterRegex := range g.lw.patterns[g.name] { - if filterRegex.Match(l.Content) { - g.lw.log.Info(). - Str("Container", g.name). - Str("Regex", filterRegex.String()). - Str("String", string(l.Content)). - Msg("Match found") - matchesPerPattern++ - } - } - return matchesPerPattern -} - func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { for _, lt := range g.logTargets { if lt == logTarget { @@ -645,6 +673,8 @@ func getLogTargetsFromEnv() ([]LogTarget, error) { envLogTargets = append(envLogTargets, Loki) case "file": envLogTargets = append(envLogTargets, File) + case "in-memory": + envLogTargets = append(envLogTargets, InMemory) default: return []LogTarget{}, errors.Errorf("unknown log target: %s", target) } diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 76143d212..6c16eec19 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -16,8 +16,9 @@ import ( type LogTarget string const ( - Loki LogTarget = "loki" - File LogTarget = "file" + Loki LogTarget = "loki" + File LogTarget = "file" + InMemory LogTarget = "in-memory" ) type HandleLogTarget interface { @@ -32,6 +33,7 @@ func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { handlers := make(map[LogTarget]HandleLogTarget) handlers[Loki] = &LokiLogHandler{} handlers[File] = &FileLogHandler{} + handlers[InMemory] = &InMemoryLogHandler{} return handlers } @@ -165,13 +167,18 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum sb.WriteString(fmt.Sprintf("&var-container_id=%s", c.name)) } + allLogs, err := c.lw.ContainerLogs(c.name) + if err != nil { + return "", errors.Errorf("failed to get logs for container '%s'", c.name) + } + // lets find the oldest log message to know when to start the range from - if len(c.Messages) > 0 { + if len(allLogs) > 0 { var firstMsg struct { Ts string `json:"ts"` } - if err := json.Unmarshal([]byte(c.Messages[0]), &firstMsg); err != nil { + if err := json.Unmarshal([]byte(allLogs[0]), &firstMsg); err != nil { return "", errors.Errorf("failed to unmarshal first log message for container '%s'", c.name) } @@ -203,3 +210,39 @@ func (h *LokiLogHandler) SetRunId(executionId string) { func (h *LokiLogHandler) GetRunId() string { return h.runId } + +// stores logs in memory +type InMemoryLogHandler struct { + logs map[string][]LogContent + runId string +} + +func (h *InMemoryLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { + if h.logs == nil { + h.logs = make(map[string][]LogContent) + } + + if _, ok := h.logs[content.ContainerName]; !ok { + h.logs[content.ContainerName] = make([]LogContent, 0) + } else { + h.logs[content.ContainerName] = append(h.logs[content.ContainerName], content) + } + + return nil +} + +func (h InMemoryLogHandler) GetLogLocation(_ map[string]*ContainerLogConsumer) (string, error) { + return "", nil +} + +func (h InMemoryLogHandler) GetTarget() LogTarget { + return InMemory +} + +func (h *InMemoryLogHandler) SetRunId(executionId string) { + h.runId = executionId +} + +func (h *InMemoryLogHandler) GetRunId() string { + return h.runId +} diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 03d5b4063..c6e8f9238 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -3,13 +3,12 @@ package logwatch_test import ( "context" "fmt" - "reflect" "regexp" "strconv" + "strings" "testing" "time" - "github.com/davecgh/go-spew/spew" "github.com/google/uuid" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -30,17 +29,6 @@ type TestCase struct { expectedNotifications map[string][]*logwatch.LogNotification } -func getNotificationsAmount(m map[string][]*regexp.Regexp) int { - if m == nil { - return 1 - } - notificationsToAwait := 0 - for _, v := range m { - notificationsToAwait += len(v) - } - return notificationsToAwait -} - // replaceContainerNamePlaceholders this function is used to replace container names with dynamic values // so we can run tests in parallel func replaceContainerNamePlaceholders(tc TestCase) []string { @@ -127,41 +115,26 @@ func TestLogWatchDocker(t *testing.T) { exitEarly: true, }, { - name: "should read exactly 10 streams and notify 4 times (2 containers)", + name: "should read exactly 10 streams (2 containers)", msg: "A\nB\nC\nD", containers: 2, msgsAmount: 1, msgsIntervalSeconds: 0.1, - mustNotifyList: map[string][]*regexp.Regexp{ - "0": { - regexp.MustCompile("A"), - regexp.MustCompile("B"), - }, - "1": { - regexp.MustCompile("C"), - regexp.MustCompile("D"), - }, - }, - expectedNotifications: map[string][]*logwatch.LogNotification{ - "0": { - &logwatch.LogNotification{Container: "0", Log: "A\n"}, - &logwatch.LogNotification{Container: "0", Log: "B\n"}, - }, - "1": { - &logwatch.LogNotification{Container: "1", Log: "C\n"}, - &logwatch.LogNotification{Container: "1", Log: "D\n"}, - }, - }, }, } + var getExpectedMsgCount = func(msg string, msgAmount int) int { + splitted := strings.Split(msg, "\n") + return len(splitted) * msgAmount + } + for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) dynamicContainerNames := replaceContainerNamePlaceholders(tc) - lw, err := logwatch.NewLogWatch(t, tc.mustNotifyList) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err) containers := make([]testcontainers.Container, 0) for _, cn := range dynamicContainerNames { @@ -175,30 +148,13 @@ func TestLogWatchDocker(t *testing.T) { // streams should be there with a gap of 1 second time.Sleep(time.Duration(int(tc.msgsIntervalSeconds*float64(tc.msgsAmount)))*time.Second + 1*time.Second) - lw.PrintAll() // all streams should be recorded for _, cn := range dynamicContainerNames { - require.Len(t, lw.ContainerLogs(cn), tc.msgsAmount*getNotificationsAmount(tc.mustNotifyList)) - } + logs, err := lw.ContainerLogs(cn) + require.NoError(t, err, "should not fail to get logs") - // client must receive notifications if mustNotifyList is set - // each container must have notifications according to their match patterns - if tc.mustNotifyList != nil { - notifications := make(map[string][]*logwatch.LogNotification) - for i := 0; i < getNotificationsAmount(tc.mustNotifyList); i++ { - msg := lw.Listen() - if notifications[msg.Container] == nil { - notifications[msg.Container] = make([]*logwatch.LogNotification, 0) - } - notifications[msg.Container] = append(notifications[msg.Container], msg) - } - t.Logf("notifications: %v", spew.Sdump(notifications)) - t.Logf("expectations: %v", spew.Sdump(tc.expectedNotifications)) - - if !reflect.DeepEqual(tc.expectedNotifications, notifications) { - t.Fatalf("expected logs: %v, got: %v", tc.expectedNotifications, notifications) - } + require.Len(t, logs, getExpectedMsgCount(tc.msg, tc.msgsAmount)) } defer func() { @@ -232,8 +188,7 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { interval := float64(1) amount := 10 - //set initial timeout to 0 so that it retries to connect using fibonacci backoff - lw, err := logwatch.NewLogWatch(t, nil) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err) container, err := startTestContainer(ctx, containerName, message, amount, interval, false) require.NoError(t, err) @@ -246,9 +201,11 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { require.NoError(t, err) time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) - lw.PrintAll() - require.Len(t, lw.ContainerLogs(containerName), amount) + logs, err := lw.ContainerLogs(containerName) + require.NoError(t, err, "should not fail to get logs") + + require.Len(t, logs, amount) t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { @@ -359,15 +316,16 @@ func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { // wait for 1 second, so that log watch has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second - logsReceived := len(lw.ContainerLogs(mockedContainer.name)) - require.True(t, logsReceived > 0, "should have received at least 1 log before injecting error") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error") mockedContainer.errorChannelError = errors.New("failed to read logs") // clear the error after 1 second, so that log producer can resume log consumption @@ -389,9 +347,10 @@ func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { require.NoError(t, err, "log watch should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 3*time.Second) - lw.PrintAll() - require.EqualValues(t, lw.ContainerLogs(mockedContainer.name), logsSent, "log watch should receive all logs") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.EqualValues(t, logs, logsSent, "log watch should receive all logs") require.Equal(t, 2, mockedContainer.startCounter, "log producer should be started twice") t.Cleanup(func() { @@ -419,14 +378,16 @@ func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { // wait for 1 second, so that log watch has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second - require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error, but got 0") mockedContainer.errorChannelError = errors.New("failed to read logs") // clear the error after 1 second, so that log producer can resume log consumption @@ -436,7 +397,9 @@ func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { // wait for 3 seconds so that some logs are consumed before we inject error again time.Sleep(3 * time.Second) mockedContainer.startSleep = 1 * time.Second - require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + logs, err = lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error, but got 0") mockedContainer.errorChannelError = errors.New("failed to read logs") // clear the error after 1 second, so that log producer can resume log consumption @@ -458,9 +421,11 @@ func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { require.NoError(t, err, "log watch should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) - lw.PrintAll() - require.EqualValues(t, lw.ContainerLogs(mockedContainer.name), logsSent, "log watch should receive all logs") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + + require.EqualValues(t, logs, logsSent, "log watch should receive all logs") require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started twice") t.Cleanup(func() { @@ -488,14 +453,16 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { // wait for 1 second, so that log watch has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second - require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error, but got 0") // introduce read error, so that log producer stops mockedContainer.errorChannelError = errors.New("failed to read logs") @@ -522,9 +489,11 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { require.NoError(t, err, "log watch should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) - lw.PrintAll() - require.EqualValues(t, logsSent, lw.ContainerLogs(mockedContainer.name), "log watch should receive all logs") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + + require.EqualValues(t, logsSent, logs, "log watch should receive all logs") require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started four times") t.Cleanup(func() { @@ -535,7 +504,7 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { } // secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer -// is still running and log watch never reconnects. log watch should salvage logs that were consumed before error was injected +// is still running and log watch never reconnects. log watch should have no logs (we could improve that in the future) func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) @@ -552,14 +521,16 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { // wait for 1 second, so that log watch has time to consume at least one log before it's stopped time.Sleep(6 * time.Second) mockedContainer.startSleep = 1 * time.Second - require.True(t, len(lw.ContainerLogs(mockedContainer.name)) > 0, "should have received at least 1 log before injecting error, but got 0") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error, but got 0") // introduce read error, so that log producer stops mockedContainer.errorChannelError = errors.New("failed to read logs") @@ -581,10 +552,11 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { require.NoError(t, err, "log watch should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) - lw.PrintAll() // it should still salvage 6 logs that were consumed before error was injected and restarting failed - require.EqualValues(t, logsSent[:6], lw.ContainerLogs(mockedContainer.name), "log watch should receive six logs") + logs, err := lw.ContainerLogs(mockedContainer.name) + require.NoError(t, err, "should not fail to get logs") + require.Equal(t, 0, len(logs), "log watch should have no logs") require.Equal(t, 7, mockedContainer.startCounter, "log producer should be started seven times") t.Cleanup(func() { diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index 8a1a6717f..f1c9856bc 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "os" - "regexp" "testing" "time" @@ -65,56 +64,6 @@ func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { /* That's how you use it */ -func TestExampleUserInteraction(t *testing.T) { - t.Run("sync API, block, receive one message", func(t *testing.T) { - ctx := context.Background() - testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} - d, err := NewDeployment(ctx, testData) - // nolint - defer d.Shutdown(ctx) - require.NoError(t, err) - lw, err := logwatch.NewLogWatch( - t, - map[string][]*regexp.Regexp{ - "container-0": { - regexp.MustCompile("A"), - }, - }, - ) - require.NoError(t, err) - err = d.ConnectLogs(lw) - require.NoError(t, err) - match := lw.Listen() - require.NotEmpty(t, match) - }) - t.Run("async API, execute some logic on match", func(t *testing.T) { - ctx := context.Background() - testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD\n", "E\nF\nG\nH\n"}} - notifications := 0 - d, err := NewDeployment(ctx, testData) - // nolint - defer d.Shutdown(ctx) - require.NoError(t, err) - lw, err := logwatch.NewLogWatch( - t, - map[string][]*regexp.Regexp{ - "container-0": { - regexp.MustCompile("A"), - }, - "container-1": { - regexp.MustCompile("E"), - }, - }, - ) - require.NoError(t, err) - lw.OnMatch(func(ln *logwatch.LogNotification) { notifications++ }) - err = d.ConnectLogs(lw) - require.NoError(t, err) - time.Sleep(1 * time.Second) - require.Equal(t, testData.repeat*len(testData.streams), notifications) - }) -} - var ( A = []byte("A\n") B = []byte("B\n") From 4f6e529c1173e0311dedc920eae4b42b23372140 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 16:41:50 -0300 Subject: [PATCH 19/40] move log producer loop from log watch to consumer --- logwatch/logwatch.go | 116 +++++++++++++++++--------- logwatch/logwatch_test.go | 170 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 238 insertions(+), 48 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 107b78358..bb87e2e4c 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -41,6 +41,7 @@ type LogProducingContainer interface { GetLogProducerErrorChannel() <-chan error IsRunning() bool GetContainerID() string + Terminate(context.Context) error } // LogWatch is a test helper struct to monitor docker container logs for some patterns @@ -53,7 +54,6 @@ type LogWatch struct { consumers map[string]*ContainerLogConsumer logTargetHandlers map[LogTarget]HandleLogTarget enabledLogTargets []LogTarget - logListeningDone chan struct{} logProducerTimeout time.Duration logProducerTimeoutRetryLimit int // -1 for infinite retries acceptMutex sync.Mutex @@ -94,7 +94,6 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... log: l, consumers: make(map[string]*ContainerLogConsumer, 0), logTargetHandlers: getDefaultLogHandlers(), - logListeningDone: make(chan struct{}, 1), logProducerTimeout: time.Duration(10 * time.Second), logProducerTimeoutRetryLimit: 10, enabledLogTargets: envLogTargets, @@ -189,18 +188,20 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC name = strings.Replace(name, "/", "", 1) prefix = strings.Replace(prefix, "/", "", 1) + if prefix == "" { + prefix = name + } + + if _, ok := m.consumers[name]; ok { + return errors.Errorf("container %s is already connected", name) + } + enabledLogTargets := make([]LogTarget, 0) for logTarget := range m.logTargetHandlers { enabledLogTargets = append(enabledLogTargets, logTarget) } - var cons *ContainerLogConsumer - if prefix != "" { - cons, err = newContainerLogConsumer(m, name, prefix, enabledLogTargets...) - } else { - cons, err = newContainerLogConsumer(m, name, name, enabledLogTargets...) - } - + cons, err := newContainerLogConsumer(ctx, m, container, prefix, enabledLogTargets...) if err != nil { return err } @@ -259,6 +260,8 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Str("Container name", name). Msg("Failed to reset temp file. Stopping logging") + cons.MarkAsErrored() + return } for container.StartLogProducer(ctx, timeout) != nil { @@ -276,6 +279,9 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Err(err). Str("Container name", name). Msg("Previously running log producer couldn't be stopped. Used all retry attempts. Won't try again") + + cons.MarkAsErrored() + return } m.log.Info(). @@ -286,6 +292,9 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Err(err). Str("Container name", name). Msg("Used all attempts to listen to container logs. Won't try again") + + cons.MarkAsErrored() + return } @@ -295,28 +304,30 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC return } } - }(m.logListeningDone, m.logProducerTimeout, m.logProducerTimeoutRetryLimit) + }(cons.logListeningDone, m.logProducerTimeout, m.logProducerTimeoutRetryLimit) return err } +func (m *LogWatch) GetConsumers() map[string]*ContainerLogConsumer { + return m.consumers +} + // Shutdown disconnects all containers, stops notifications func (m *LogWatch) Shutdown(context context.Context) error { - defer close(m.logListeningDone) var err error - for _, c := range m.containers { - singleErr := m.DisconnectContainer(c) - if singleErr != nil { - name, _ := c.Name(context) + for _, c := range m.consumers { + discErr := m.DisconnectContainer(c) + if discErr != nil { m.log.Error(). Err(err). - Str("Name", name). + Str("Name", c.name). Msg("Failed to disconnect container") if err == nil { - err = singleErr + err = discErr } else { - err = errors.Wrap(err, singleErr.Error()) + err = errors.Wrap(err, discErr.Error()) } } } @@ -325,8 +336,6 @@ func (m *LogWatch) Shutdown(context context.Context) error { m.loki.Stop() } - m.logListeningDone <- struct{}{} - return err } @@ -361,10 +370,18 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } // DisconnectContainer disconnects the particular container -func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { - if container.IsRunning() { - m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") - return container.StopLogProducer() +func (m *LogWatch) DisconnectContainer(consumer *ContainerLogConsumer) error { + if consumer.isDone { + return nil + } + + consumer.isDone = true + consumer.logListeningDone <- struct{}{} + defer close(consumer.logListeningDone) + + if consumer.container.IsRunning() { + m.log.Info().Str("container", consumer.container.GetContainerID()).Msg("Disconnecting container") + return consumer.container.StopLogProducer() } return nil @@ -537,27 +554,38 @@ func (m *LogWatch) FlushLogsToTargets() error { // ContainerLogConsumer is a container log lines consumer type ContainerLogConsumer struct { - name string - prefix string - logTargets []LogTarget - lw *LogWatch - tempFile *os.File - encoder *gob.Encoder - isDone bool - hasErrored bool + name string + prefix string + logTargets []LogTarget + lw *LogWatch + tempFile *os.File + encoder *gob.Encoder + isDone bool + hasErrored bool + logListeningDone chan struct{} + container LogProducingContainer } // newContainerLogConsumer creates new log consumer for a container that // - signal if log line matches the pattern // - push all lines to configured log targets -func newContainerLogConsumer(lw *LogWatch, containerName string, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { +func newContainerLogConsumer(ctx context.Context, lw *LogWatch, container LogProducingContainer, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { + containerName, err := container.Name(ctx) + if err != nil { + return nil, err + } + + containerName = strings.Replace(containerName, "/", "", 1) + consumer := &ContainerLogConsumer{ - name: containerName, - prefix: prefix, - logTargets: logTargets, - lw: lw, - isDone: false, - hasErrored: false, + name: containerName, + prefix: prefix, + logTargets: logTargets, + lw: lw, + isDone: false, + hasErrored: false, + logListeningDone: make(chan struct{}, 1), + container: container, } if len(logTargets) == 0 { @@ -593,6 +621,16 @@ func (g *ContainerLogConsumer) ResetTempFile() error { return nil } +func (g *ContainerLogConsumer) MarkAsErrored() { + g.hasErrored = true + g.isDone = true + close(g.logListeningDone) +} + +func (g *ContainerLogConsumer) GetContainer() LogProducingContainer { + return g.container +} + // Accept accepts the log message from particular container and saves it to the temp gob file func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { g.lw.acceptMutex.Lock() diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index c6e8f9238..22f1b0864 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -136,7 +136,7 @@ func TestLogWatchDocker(t *testing.T) { dynamicContainerNames := replaceContainerNamePlaceholders(tc) lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err) - containers := make([]testcontainers.Container, 0) + for _, cn := range dynamicContainerNames { container, err := startTestContainer(ctx, cn, tc.msg, tc.msgsAmount, tc.msgsIntervalSeconds, tc.exitEarly) require.NoError(t, err) @@ -165,12 +165,13 @@ func TestLogWatchDocker(t *testing.T) { // https://github.com/testcontainers/testcontainers-go/pull/1085 // tried latest branch with a fix, but no luck // this code terminates the containers properly - for _, c := range containers { + for _, c := range lw.GetConsumers() { if !tc.exitEarly { if err := lw.DisconnectContainer(c); err != nil { t.Fatalf("failed to disconnect container: %s", err.Error()) } - if err := c.Terminate(ctx); err != nil { + container := c.GetContainer() + if err := container.Terminate(ctx); err != nil { t.Fatalf("failed to terminate container: %s", err.Error()) } } @@ -217,6 +218,65 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { }) } +func TestLogWatchTwoDockerContainers(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + containerOneName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) + containerTwoName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) + message := "message" + interval := float64(1) + amountFirst := 10 + amountSecond := 20 + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + require.NoError(t, err, "log watch should be created") + containerOne, err := startTestContainer(ctx, containerOneName, message, amountFirst, interval, false) + require.NoError(t, err, "should not fail to start container") + + containerTwo, err := startTestContainer(ctx, containerTwoName, message, amountSecond, interval, false) + require.NoError(t, err, "should not fail to start container") + + err = lw.ConnectContainer(context.Background(), containerOne, containerOneName) + require.NoError(t, err, "log watch should connect to container") + + err = lw.ConnectContainer(context.Background(), containerTwo, containerTwoName) + require.NoError(t, err, "log watch should connect to container") + + time.Sleep(time.Duration(int(interval*float64(amountFirst)))*time.Second + 5*time.Second) + + for _, c := range lw.GetConsumers() { + name, err := c.GetContainer().Name(ctx) + require.NoError(t, err, "should not fail to get container name") + if name == containerOneName { + err = lw.DisconnectContainer(c) + require.NoError(t, err, "log watch should disconnect from container") + } + } + + time.Sleep(time.Duration(int(interval*float64(amountSecond-amountFirst)))*time.Second + 5*time.Second) + + logs, err := lw.ContainerLogs(containerOneName) + require.NoError(t, err, "should not fail to get logs") + + require.Len(t, logs, amountFirst, "wrong number of logs received from first container") + + logs, err = lw.ContainerLogs(containerTwoName) + require.NoError(t, err, "should not fail to get logs") + require.Len(t, logs, amountSecond, "wrong number of logs received from first container") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + if err := containerOne.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate first container: %s", err.Error()) + } + if err := containerTwo.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate second container: %s", err.Error()) + } + }) +} + type MockedLogProducingContainer struct { name string id string @@ -290,6 +350,10 @@ func (m *MockedLogProducingContainer) IsRunning() bool { return m.isRunning } +func (m *MockedLogProducingContainer) Terminate(context.Context) error { + return nil +} + func (m *MockedLogProducingContainer) GetContainerID() string { return m.id } @@ -300,7 +364,7 @@ func (m *MockedLogProducingContainer) SendLog(msg string) { // secenario: log watch consumes a log, then the container returns an error, log watch reconnects // and consumes logs again. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { +func TestLogWatchConnectRetryMockContainer_FailsOnce(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -362,7 +426,7 @@ func TestLogWatchConnectRetryMockContainer_Once(t *testing.T) { // secenario: log watch consumes a log, then the container returns an error, log watch reconnects // and consumes logs again, then it happens again. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { +func TestLogWatchConnectRetryMockContainer_FailsTwice(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -437,7 +501,7 @@ func TestLogWatchConnectRetryMockContainer_Twice(t *testing.T) { // secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer // is still running, but finally it stops and log watch reconnects. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { +func TestLogWatchConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -505,7 +569,7 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedFirstTime(t *testing.T) { // secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer // is still running and log watch never reconnects. log watch should have no logs (we could improve that in the future) -func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { +func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -538,11 +602,9 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { mockedContainer.startError = errors.New("still running") }() - logsSent := []string{} go func() { for i := 0; i < amount; i++ { toSend := fmt.Sprintf("message-%d", i) - logsSent = append(logsSent, toSend) mockedContainer.SendLog(toSend) time.Sleep(time.Duration(time.Duration(interval) * time.Second)) } @@ -565,3 +627,93 @@ func TestLogWatchConnectRetryMockContainer_NotStoppedEver(t *testing.T) { } }) } + +// scenario: log listening loops are independent for all containers/consumers and even if one of them stops +// due to errors, second one continues and receives all logs +func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWorks(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + uuid_1 := uuid.NewString() + uuid_2 := uuid.NewString() + amountFirst := 10 + amountSecond := 20 + interval := float64(1) + + mockedContainer_1 := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid_1), + id: uuid_1, + isRunning: true, + startError: nil, + stopError: nil, + errorChannelError: nil, + } + + mockedContainer_2 := &MockedLogProducingContainer{ + name: fmt.Sprintf("%s-container-%s", t.Name(), uuid_2), + id: uuid_2, + isRunning: true, + startError: nil, + stopError: nil, + errorChannelError: nil, + } + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7), logwatch.WithLogTarget(logwatch.InMemory)) + require.NoError(t, err, "log watch should be created") + + go func() { + // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + time.Sleep(6 * time.Second) + mockedContainer_1.startSleep = 1 * time.Second + logs, err := lw.ContainerLogs(mockedContainer_1.name) + require.NoError(t, err, "should not fail to get logs") + require.True(t, len(logs) > 0, "should have received at least 1 log before injecting error, but got 0") + + // introduce read error, so that log producer stops + mockedContainer_1.errorChannelError = errors.New("failed to read logs") + // inject start error, that simulates log producer still running (e.g. closing connection to the container) + mockedContainer_1.startError = errors.New("still running") + }() + + go func() { + for i := 0; i < amountFirst; i++ { + toSend := fmt.Sprintf("message-%d", i) + mockedContainer_1.SendLog(toSend) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) + } + }() + + logsSent := []string{} + go func() { + for i := 0; i < amountSecond; i++ { + toSend := fmt.Sprintf("message-%d", i) + logsSent = append(logsSent, toSend) + mockedContainer_2.SendLog(toSend) + time.Sleep(time.Duration(time.Duration(interval) * time.Second)) + } + }() + + err = lw.ConnectContainer(context.Background(), mockedContainer_1, mockedContainer_1.name) + require.NoError(t, err, "log watch should connect to container") + + err = lw.ConnectContainer(context.Background(), mockedContainer_2, mockedContainer_2.name) + require.NoError(t, err, "log watch should connect to container") + + time.Sleep(time.Duration(int(interval*float64(amountSecond)))*time.Second + 5*time.Second) + + logs_1, err := lw.ContainerLogs(mockedContainer_1.name) + require.NoError(t, err, "should not fail to get logs") + require.Equal(t, 0, len(logs_1), "log watch should have no logs") + require.Equal(t, 7, mockedContainer_1.startCounter, "log producer should be started seven times for first container") + + logs_2, err := lw.ContainerLogs(mockedContainer_2.name) + require.NoError(t, err, "should not fail to get logs") + require.Equal(t, amountSecond, len(logs_2), "log watch should have all logs for second container") + require.EqualValues(t, logsSent, logs_2, "log watch had different logs for second container than expected") + require.Equal(t, 1, mockedContainer_2.startCounter, "log producer should be started one time for second container") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + }) +} From 9c9011949bcdc8533c89f7d3bc4e24b83ee12687 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 17:38:37 -0300 Subject: [PATCH 20/40] use avast/retry in one place --- logwatch/logwatch.go | 49 +++++++++++++++++++------------- logwatch/logwatch_test.go | 8 +++--- utils/retries/retry_functions.go | 8 ------ 3 files changed, 34 insertions(+), 31 deletions(-) delete mode 100644 utils/retries/retry_functions.go diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index bb87e2e4c..4904a091e 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/avast/retry-go" "github.com/google/uuid" "github.com/pkg/errors" "github.com/rs/zerolog" @@ -20,7 +21,6 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/testsummary" - "github.com/smartcontractkit/chainlink-testing-framework/utils/retries" "github.com/smartcontractkit/chainlink-testing-framework/utils/runid" ) @@ -173,12 +173,19 @@ func WithLogProducerTimeout(timeout time.Duration) Option { } } -func WithLogProducerTimeoutRetryLimit(retryLimit int) Option { +func WithLogProducerRetryLimit(retryLimit int) Option { return func(lw *LogWatch) { lw.logProducerTimeoutRetryLimit = retryLimit } } +func fibonacci(n int) int { + if n <= 1 { + return n + } + return fibonacci(n-1) + fibonacci(n-2) +} + // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { name, err := container.Name(ctx) @@ -242,7 +249,7 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Str("Container name", name). Msg("Log producer errored") if shouldRetry() { - backoff := retries.Fibonacci(currentAttempt) + backoff := fibonacci(currentAttempt) timeout = timeout + time.Duration(backoff)*time.Second m.log.Info(). Str("Prefix", prefix). @@ -250,11 +257,10 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC Str("Timeout", timeout.String()). Msgf("Retrying connection and listening to container logs. Attempt %d/%d", currentAttempt, retryLimit) - failedToStart := false //TODO if there are many failures here we could save the file and restore it content if we fail to - //create a new temp file - resetErr := cons.ResetTempFile() - if resetErr != nil { + //create a new temp file; we remove the previous one to avoid log duplication, because new log producer + //fetches logs from the beginning + if resetErr := cons.ResetTempFile(); resetErr != nil { m.log.Error(). Err(resetErr). Str("Container name", name). @@ -264,17 +270,21 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC return } - for container.StartLogProducer(ctx, timeout) != nil { - if !shouldRetry() { - failedToStart = true - break - } - m.log.Info(). - Str("Container name", name). - Msg("Waiting for log producer to stop before restarting it") - time.Sleep(1 * time.Second) - } - if failedToStart { + + startErr := retry.Do(func() error { + return container.StartLogProducer(ctx, timeout) + }, + retry.Attempts(uint(retryLimit)), + retry.Delay(1*time.Second), + retry.OnRetry(func(n uint, err error) { + m.log.Info(). + Str("Container name", name). + Str("Attempt", fmt.Sprintf("%d/%d", n+1, retryLimit)). + Msg("Waiting for log producer to stop before restarting it") + }), + ) + + if startErr != nil { m.log.Error(). Err(err). Str("Container name", name). @@ -284,6 +294,7 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC return } + m.log.Info(). Str("Container name", name). Msg("Started new log producer") @@ -298,7 +309,7 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC return } - time.Sleep(500 * time.Millisecond) + // time.Sleep(500 * time.Millisecond) } case <-done: return diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 22f1b0864..dc66f54ab 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -585,7 +585,7 @@ func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7), logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerRetryLimit(4), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { @@ -619,7 +619,7 @@ func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { logs, err := lw.ContainerLogs(mockedContainer.name) require.NoError(t, err, "should not fail to get logs") require.Equal(t, 0, len(logs), "log watch should have no logs") - require.Equal(t, 7, mockedContainer.startCounter, "log producer should be started seven times") + require.Equal(t, 5, mockedContainer.startCounter, "log producer should be started seven times") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { @@ -657,7 +657,7 @@ func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWor errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerTimeoutRetryLimit(7), logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerRetryLimit(4), logwatch.WithLogTarget(logwatch.InMemory)) require.NoError(t, err, "log watch should be created") go func() { @@ -703,7 +703,7 @@ func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWor logs_1, err := lw.ContainerLogs(mockedContainer_1.name) require.NoError(t, err, "should not fail to get logs") require.Equal(t, 0, len(logs_1), "log watch should have no logs") - require.Equal(t, 7, mockedContainer_1.startCounter, "log producer should be started seven times for first container") + require.Equal(t, 5, mockedContainer_1.startCounter, "log producer should be started seven times for first container") logs_2, err := lw.ContainerLogs(mockedContainer_2.name) require.NoError(t, err, "should not fail to get logs") diff --git a/utils/retries/retry_functions.go b/utils/retries/retry_functions.go deleted file mode 100644 index 1754207cf..000000000 --- a/utils/retries/retry_functions.go +++ /dev/null @@ -1,8 +0,0 @@ -package retries - -func Fibonacci(n int) int { - if n <= 1 { - return n - } - return Fibonacci(n-1) + Fibonacci(n-2) -} From 0bc91a141829925360ab7e3fa239639c5d2a4276 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 17:56:37 -0300 Subject: [PATCH 21/40] fix go.mod --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 831274507..e10928d1c 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.21 require ( github.com/avast/retry-go v3.0.0+incompatible github.com/chaos-mesh/chaos-mesh/api/v1alpha1 v0.0.0-20220226050744-799408773657 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/go-connections v0.4.0 github.com/ethereum/go-ethereum v1.12.0 github.com/go-resty/resty/v2 v2.7.0 @@ -34,6 +33,7 @@ require ( github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect github.com/cespare/cp v1.1.1 // indirect github.com/containerd/log v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/hashicorp/go-version v1.6.0 // indirect From aacd0b15b3edc799e4d41d0067944bf1c9f15c49 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 18:28:17 -0300 Subject: [PATCH 22/40] restore old disconnect interface --- logwatch/logwatch.go | 26 +++++++++++++++----------- logwatch/logwatch_test.go | 6 ++++-- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 4904a091e..5c124e76b 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -328,7 +328,9 @@ func (m *LogWatch) GetConsumers() map[string]*ContainerLogConsumer { func (m *LogWatch) Shutdown(context context.Context) error { var err error for _, c := range m.consumers { - discErr := m.DisconnectContainer(c) + c.Stop() + + discErr := m.DisconnectContainer(c.container) if discErr != nil { m.log.Error(). Err(err). @@ -380,19 +382,21 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } } -// DisconnectContainer disconnects the particular container -func (m *LogWatch) DisconnectContainer(consumer *ContainerLogConsumer) error { - if consumer.isDone { - return nil +func (c *ContainerLogConsumer) Stop() { + if c.isDone { + return } - consumer.isDone = true - consumer.logListeningDone <- struct{}{} - defer close(consumer.logListeningDone) + c.isDone = true + c.logListeningDone <- struct{}{} + defer close(c.logListeningDone) +} - if consumer.container.IsRunning() { - m.log.Info().Str("container", consumer.container.GetContainerID()).Msg("Disconnecting container") - return consumer.container.StopLogProducer() +// DisconnectContainer disconnects the particular container +func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { + if container.IsRunning() { + m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") + return container.StopLogProducer() } return nil diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index dc66f54ab..48100dbd7 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -167,7 +167,8 @@ func TestLogWatchDocker(t *testing.T) { // this code terminates the containers properly for _, c := range lw.GetConsumers() { if !tc.exitEarly { - if err := lw.DisconnectContainer(c); err != nil { + c.Stop() + if err := lw.DisconnectContainer(c.GetContainer()); err != nil { t.Fatalf("failed to disconnect container: %s", err.Error()) } container := c.GetContainer() @@ -248,7 +249,8 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { name, err := c.GetContainer().Name(ctx) require.NoError(t, err, "should not fail to get container name") if name == containerOneName { - err = lw.DisconnectContainer(c) + c.Stop() + err = lw.DisconnectContainer(containerOne) require.NoError(t, err, "log watch should disconnect from container") } } From 21bedef04d48dcab1ddf6d5f23f78668f436ebd5 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 19:36:04 -0300 Subject: [PATCH 23/40] fix lints --- logwatch/logwatch.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 5c124e76b..eea0cbf72 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -382,14 +382,14 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } } -func (c *ContainerLogConsumer) Stop() { - if c.isDone { +func (g *ContainerLogConsumer) Stop() { + if g.isDone { return } - c.isDone = true - c.logListeningDone <- struct{}{} - defer close(c.logListeningDone) + g.isDone = true + g.logListeningDone <- struct{}{} + defer close(g.logListeningDone) } // DisconnectContainer disconnects the particular container From 395f48d7aecb4a738069e0d795f4a3ab4e9ff24b Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 4 Dec 2023 19:52:01 -0300 Subject: [PATCH 24/40] do not interate over all logs to get first timestamp, save it when accepting logs --- logwatch/logwatch.go | 30 ++++++++++++++++++++++++++++++ logwatch/logwatch_handlers.go | 26 ++------------------------ 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index eea0cbf72..83eb00b76 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -3,6 +3,7 @@ package logwatch import ( "context" "encoding/gob" + "encoding/json" "fmt" "io" "os" @@ -579,6 +580,7 @@ type ContainerLogConsumer struct { hasErrored bool logListeningDone chan struct{} container LogProducingContainer + firstLogTs time.Time } // newContainerLogConsumer creates new log consumer for a container that @@ -601,6 +603,7 @@ func newContainerLogConsumer(ctx context.Context, lw *LogWatch, container LogPro hasErrored: false, logListeningDone: make(chan struct{}, 1), container: container, + firstLogTs: time.Now(), } if len(logTargets) == 0 { @@ -618,6 +621,10 @@ func newContainerLogConsumer(ctx context.Context, lw *LogWatch, container LogPro return consumer, nil } +func (g *ContainerLogConsumer) GetStartTime() time.Time { + return g.firstLogTs +} + func (g *ContainerLogConsumer) ResetTempFile() error { if g.tempFile != nil { if err := g.tempFile.Close(); err != nil { @@ -672,10 +679,33 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { if g.tempFile == nil || g.encoder == nil { g.hasErrored = true g.lw.log.Error(). + Str("Container", g.name). Msg("temp file or encoder is nil, consumer cannot work, this should never happen") return } + var logMsg struct { + Ts string `json:"ts"` + } + + if err := json.Unmarshal([]byte(l.Content), &logMsg); err != nil { + g.lw.log.Error(). + Str("Container", g.name). + Msg("failed to unmarshal log message for container") + } + + maybeFirstTs, err := time.Parse(time.RFC3339, logMsg.Ts) + if err != nil { + g.lw.log.Error(). + Str("Container", g.name). + Str("Timestamp", logMsg.Ts). + Msg("failed to parse first log message's timestamp ") + } + + if maybeFirstTs.Before(g.firstLogTs) { + g.firstLogTs = maybeFirstTs + } + content := LogContent{ TestName: g.lw.testName, ContainerName: g.name, diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index 6c16eec19..d672d67c4 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -1,7 +1,6 @@ package logwatch import ( - "encoding/json" "fmt" "os" "path/filepath" @@ -167,29 +166,8 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum sb.WriteString(fmt.Sprintf("&var-container_id=%s", c.name)) } - allLogs, err := c.lw.ContainerLogs(c.name) - if err != nil { - return "", errors.Errorf("failed to get logs for container '%s'", c.name) - } - - // lets find the oldest log message to know when to start the range from - if len(allLogs) > 0 { - var firstMsg struct { - Ts string `json:"ts"` - } - - if err := json.Unmarshal([]byte(allLogs[0]), &firstMsg); err != nil { - return "", errors.Errorf("failed to unmarshal first log message for container '%s'", c.name) - } - - firstTs, err := time.Parse(time.RFC3339, firstMsg.Ts) - if err != nil { - return "", errors.Errorf("failed to parse first log message's timestamp '%+v' for container '%s'", firstTs, c.name) - } - - if firstTs.Before(rangeFrom) { - rangeFrom = firstTs - } + if c.GetStartTime().Before(rangeFrom) { + rangeFrom = c.GetStartTime() } } From 76ec358cc94f0c3adbf684b811f962f49cdbcea2 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 06:46:12 -0300 Subject: [PATCH 25/40] fix log parsing to get timestamp --- logwatch/logwatch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 83eb00b76..1d33db568 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -688,7 +688,7 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { Ts string `json:"ts"` } - if err := json.Unmarshal([]byte(l.Content), &logMsg); err != nil { + if err := json.Unmarshal(l.Content, &logMsg); err != nil { g.lw.log.Error(). Str("Container", g.name). Msg("failed to unmarshal log message for container") From c93c7928c8ea2fb4113e9822b2d368e0be36e60a Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 08:20:44 -0300 Subject: [PATCH 26/40] do not close temp file when flushing logs, do it on shutdown --- logwatch/logwatch.go | 123 +++++++++++------------ logwatch/logwatch_test.go | 175 ++++++++++++++++++++++++++++++++- logwatch/logwatch_user_test.go | 9 ++ 3 files changed, 242 insertions(+), 65 deletions(-) diff --git a/logwatch/logwatch.go b/logwatch/logwatch.go index 1d33db568..012c87c28 100644 --- a/logwatch/logwatch.go +++ b/logwatch/logwatch.go @@ -313,6 +313,7 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC // time.Sleep(500 * time.Millisecond) } case <-done: + fmt.Printf("DONE") return } } @@ -329,7 +330,13 @@ func (m *LogWatch) GetConsumers() map[string]*ContainerLogConsumer { func (m *LogWatch) Shutdown(context context.Context) error { var err error for _, c := range m.consumers { - c.Stop() + if stopErr := c.Stop(); stopErr != nil { + m.log.Error(). + Err(stopErr). + Str("Name", c.name). + Msg("Failed to stop container") + err = stopErr + } discErr := m.DisconnectContainer(c.container) if discErr != nil { @@ -383,14 +390,20 @@ func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { } } -func (g *ContainerLogConsumer) Stop() { +func (g *ContainerLogConsumer) Stop() error { if g.isDone { - return + return nil } g.isDone = true g.logListeningDone <- struct{}{} defer close(g.logListeningDone) + + if g.tempFile != nil { + return g.tempFile.Close() + } + + return nil } // DisconnectContainer disconnects the particular container @@ -403,10 +416,6 @@ func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { return nil } -var noOpConsumerFn = func(consumer *ContainerLogConsumer) error { - return nil -} - // ContainerLogs return all logs for the particular container func (m *LogWatch) ContainerLogs(name string) ([]string, error) { logs := []string{} @@ -417,7 +426,7 @@ func (m *LogWatch) ContainerLogs(name string) ([]string, error) { return nil } - err := m.getAllLogsAndExecute(noOpConsumerFn, getLogsFn, noOpConsumerFn) + err := m.GetAllLogsAndConsume(NoOpConsumerFn, getLogsFn) if err != nil { return []string{}, err } @@ -425,11 +434,17 @@ func (m *LogWatch) ContainerLogs(name string) ([]string, error) { return logs, err } -func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLogConsumer) error, executeFn func(consumer *ContainerLogConsumer, log LogContent) error, cleanUpFn func(consumer *ContainerLogConsumer) error) error { +type ConsumerConsumingFn = func(consumer *ContainerLogConsumer) error +type ConsumerLogConsumingFn = func(consumer *ContainerLogConsumer, log LogContent) error + +func NoOpConsumerFn(consumer *ContainerLogConsumer) error { + return nil +} + +func (m *LogWatch) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn) (loopErr error) { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() - var loopErr error var attachError = func(err error) { if err == nil { return @@ -448,7 +463,8 @@ func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLog } if consumer.tempFile == nil { - return errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name) + attachError(errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name)) + return } preExecuteErr := preExecuteFn(consumer) @@ -458,20 +474,23 @@ func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLog Str("Container", consumer.name). Msg("Failed to run pre-execute function") attachError(preExecuteErr) - break + continue } - // set the cursor to the end of the file, when done to resume writing + // set the cursor to the end of the file, when done to resume writing, unless it was closed //revive:disable defer func() { - _, deferErr := consumer.tempFile.Seek(0, 2) - attachError(deferErr) + if !consumer.isDone { + _, deferErr := consumer.tempFile.Seek(0, 2) + attachError(deferErr) + } }() //revive:enable - _, err := consumer.tempFile.Seek(0, 0) - if err != nil { - return err + _, seekErr := consumer.tempFile.Seek(0, 0) + if seekErr != nil { + attachError(seekErr) + return } decoder := gob.NewDecoder(consumer.tempFile) @@ -484,13 +503,13 @@ func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLog decodeErr := decoder.Decode(&log) if decodeErr == nil { counter++ - executeErr := executeFn(consumer, log) - if executeErr != nil { + consumeErr := consumeLogFn(consumer, log) + if consumeErr != nil { m.log.Error(). - Err(executeErr). + Err(consumeErr). Str("Container", consumer.name). - Msg("Failed to run execute function") - attachError(preExecuteErr) + Msg("Failed to consume log") + attachError(consumeErr) break LOG_LOOP } } else if errors.Is(decodeErr, io.EOF) { @@ -500,21 +519,17 @@ func (m *LogWatch) getAllLogsAndExecute(preExecuteFn func(consumer *ContainerLog Msg("Finished getting logs") break } else { - return decodeErr + m.log.Error(). + Err(decodeErr). + Str("Container", consumer.name). + Msg("Failed to decode log") + attachError(decodeErr) + return } } - - c := consumer - - // done on purpose - //revive:disable - defer func() { - attachError(cleanUpFn(c)) - }() - //revive:enable } - return loopErr + return } // FlushLogsToTargets flushes all logs for all consumers (containers) to their targets @@ -541,22 +556,16 @@ func (m *LogWatch) FlushLogsToTargets() error { Str("Container", consumer.name). Str("log target", string(logTarget)). Msg("No handler found for log target. Aborting") + + return errors.Errorf("no handler found for log target: %s", logTarget) } } return nil } - var closeTempFileFn = func(consumer *ContainerLogConsumer) error { - if consumer.tempFile == nil { - return errors.Errorf("temp file is nil for container %s, this should never happen", consumer.name) - } - - return consumer.tempFile.Close() - } - - flushErr := m.getAllLogsAndExecute(preExecuteFn, flushLogsFn, closeTempFileFn) - if flushErr != nil { + flushErr := m.GetAllLogsAndConsume(preExecuteFn, flushLogsFn) + if flushErr == nil { m.log.Info(). Msg("Finished flushing logs") } else { @@ -677,10 +686,11 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { } if g.tempFile == nil || g.encoder == nil { - g.hasErrored = true g.lw.log.Error(). Str("Container", g.name). Msg("temp file or encoder is nil, consumer cannot work, this should never happen") + g.MarkAsErrored() + return } @@ -688,22 +698,13 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { Ts string `json:"ts"` } - if err := json.Unmarshal(l.Content, &logMsg); err != nil { - g.lw.log.Error(). - Str("Container", g.name). - Msg("failed to unmarshal log message for container") - } - - maybeFirstTs, err := time.Parse(time.RFC3339, logMsg.Ts) - if err != nil { - g.lw.log.Error(). - Str("Container", g.name). - Str("Timestamp", logMsg.Ts). - Msg("failed to parse first log message's timestamp ") - } - - if maybeFirstTs.Before(g.firstLogTs) { - g.firstLogTs = maybeFirstTs + // if we cannot unmarshal it, ignore it + if err := json.Unmarshal(l.Content, &logMsg); err == nil { + maybeFirstTs, err := time.Parse(time.RFC3339, logMsg.Ts) + // if it's not a valid timestamp, ignore it + if err == nil && maybeFirstTs.Before(g.firstLogTs) { + g.firstLogTs = maybeFirstTs + } } content := LogContent{ diff --git a/logwatch/logwatch_test.go b/logwatch/logwatch_test.go index 48100dbd7..76d18eb50 100644 --- a/logwatch/logwatch_test.go +++ b/logwatch/logwatch_test.go @@ -167,7 +167,8 @@ func TestLogWatchDocker(t *testing.T) { // this code terminates the containers properly for _, c := range lw.GetConsumers() { if !tc.exitEarly { - c.Stop() + stopErr := c.Stop() + require.NoError(t, stopErr, "should not fail to stop log producer") if err := lw.DisconnectContainer(c.GetContainer()); err != nil { t.Fatalf("failed to disconnect container: %s", err.Error()) } @@ -202,7 +203,7 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { err = lw.ConnectContainer(context.Background(), container, name) require.NoError(t, err) - time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 10*time.Second) logs, err := lw.ContainerLogs(containerName) require.NoError(t, err, "should not fail to get logs") @@ -219,6 +220,171 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { }) } +func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + containerName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) + message := `{"log":"message", "ts": "2021-01-01T00:00:00.000Z"}` + interval := float64(1) + amount := 10 + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + require.NoError(t, err) + container, err := startTestContainer(ctx, containerName, message, amount, interval, false) + require.NoError(t, err) + name, err := container.Name(ctx) + require.NoError(t, err) + + err = lw.ConnectContainer(context.Background(), container, name) + require.NoError(t, err) + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 10*time.Second) + + expectedErrorText := "execute test error" + + count := 0 + logsProcessed := []string{} + var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + if count < 5 { + logsProcessed = append(logsProcessed, string(log.Content)) + count++ + return nil + } + + return errors.New(expectedErrorText) + } + + err = lw.GetAllLogsAndConsume(logwatch.NoOpConsumerFn, testFn) + require.Error(t, err, "should fail to get all logs") + require.Equal(t, err.Error(), expectedErrorText, "should fail with test error") + require.Equal(t, 5, len(logsProcessed), "should process 5 logs") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + if err := container.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate container: %s", err.Error()) + } + }) +} + +func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + containerName_1 := fmt.Sprintf("container-%s", uuid.NewString()) + containerName_2 := fmt.Sprintf("container-%s", uuid.NewString()) + message := `{"log":"message", "ts": "2021-01-01T00:00:00.000Z"}` + interval := float64(1) + amount := 10 + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + require.NoError(t, err) + container_1, err := startTestContainer(ctx, containerName_1, message, amount, interval, false) + require.NoError(t, err) + + err = lw.ConnectContainer(context.Background(), container_1, containerName_1) + require.NoError(t, err) + + container_2, err := startTestContainer(ctx, containerName_2, message, amount, interval, false) + require.NoError(t, err) + + err = lw.ConnectContainer(context.Background(), container_2, containerName_2) + require.NoError(t, err) + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 10*time.Second) + + expectedErrorText := "execute test error" + + count := 0 + logsProcessed := map[string][]string{} + var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + name, _ := consumer.GetContainer().Name(ctx) + name = strings.TrimPrefix(name, "/") + if name == containerName_2 || count < 5 { + if _, ok := logsProcessed[name]; !ok { + logsProcessed[name] = []string{string(log.Content)} + } else { + logsProcessed[name] = append(logsProcessed[name], string(log.Content)) + } + count++ + return nil + } + + return errors.New(expectedErrorText) + } + + err = lw.GetAllLogsAndConsume(logwatch.NoOpConsumerFn, testFn) + require.Error(t, err, "should fail to get all logs") + require.Equal(t, expectedErrorText, err.Error(), "should fail with test error") + require.Equal(t, 5, len(logsProcessed[containerName_1]), "should process 5 logs for first container") + require.Equal(t, 10, len(logsProcessed[containerName_2]), "should process all logs for second container") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + if err := container_1.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate first ontainer: %s", err.Error()) + } + if err := container_2.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate second container: %s", err.Error()) + } + }) +} + +func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { + t.Parallel() + ctx := testcontext.Get(t) + containerName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) + message := `{"log":"message", "ts": "2021-01-01T00:00:00.000Z"}` + interval := float64(1) + amount := 10 + + lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + require.NoError(t, err) + container, err := startTestContainer(ctx, containerName, message, amount, interval, false) + require.NoError(t, err) + name, err := container.Name(ctx) + require.NoError(t, err) + + err = lw.ConnectContainer(context.Background(), container, name) + require.NoError(t, err) + + time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 10*time.Second) + + count := 0 + logsProcessed := []string{} + var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + if count < 5 { + logsProcessed = append(logsProcessed, string(log.Content)) + count++ + return nil + } + + return errors.New("test error") + } + + expectedErrorText := "pre-execute test error" + var errorConsumerFn = func(consumer *logwatch.ContainerLogConsumer) error { + return errors.New(expectedErrorText) + } + + err = lw.GetAllLogsAndConsume(errorConsumerFn, testFn) + require.Error(t, err, "should fail to get all logs") + require.Equal(t, err.Error(), expectedErrorText, "should fail with test error") + require.Equal(t, 0, len(logsProcessed), "should process zero logs") + + t.Cleanup(func() { + if err := lw.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + } + if err := container.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate container: %s", err.Error()) + } + }) +} + func TestLogWatchTwoDockerContainers(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) @@ -243,13 +409,14 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { err = lw.ConnectContainer(context.Background(), containerTwo, containerTwoName) require.NoError(t, err, "log watch should connect to container") - time.Sleep(time.Duration(int(interval*float64(amountFirst)))*time.Second + 5*time.Second) + time.Sleep(time.Duration(int(interval*float64(amountSecond)))*time.Second + 5*time.Second) for _, c := range lw.GetConsumers() { name, err := c.GetContainer().Name(ctx) require.NoError(t, err, "should not fail to get container name") if name == containerOneName { - c.Stop() + stopErr := c.Stop() + require.NoError(t, stopErr, "should not fail to stop log producer") err = lw.DisconnectContainer(containerOne) require.NoError(t, err, "log watch should disconnect from container") } diff --git a/logwatch/logwatch_user_test.go b/logwatch/logwatch_user_test.go index f1c9856bc..84cff2224 100644 --- a/logwatch/logwatch_user_test.go +++ b/logwatch/logwatch_user_test.go @@ -105,6 +105,9 @@ func TestFileLoggingTarget(t *testing.T) { require.True(t, bytes.Contains(content, A), "A should be present in log file") require.True(t, bytes.Contains(content, B), "B should be present in log file") require.True(t, bytes.Contains(content, C), "C should be present in log file") + + err = lw.Shutdown(ctx) + require.NoError(t, err, "failed to shutdown logwatch") } type MockedLogHandler struct { @@ -161,6 +164,9 @@ func TestMultipleMockedLoggingTargets(t *testing.T) { assertMockedHandlerHasLogs(t, mockedFileHandler) assertMockedHandlerHasLogs(t, mockedLokiHanlder) + + err = lw.Shutdown(ctx) + require.NoError(t, err, "failed to shutdown logwatch") } func TestOneMockedLoggingTarget(t *testing.T) { @@ -186,6 +192,9 @@ func TestOneMockedLoggingTarget(t *testing.T) { require.NoError(t, err, "failed to flush logs to targets") assertMockedHandlerHasLogs(t, mockedLokiHanlder) + + err = lw.Shutdown(ctx) + require.NoError(t, err, "failed to shutdown logwatch") } func assertMockedHandlerHasLogs(t *testing.T, handler *MockedLogHandler) { From 091b6c21fc34ab1ea30e3e557e141ea0cc13b9dc Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 08:39:50 -0300 Subject: [PATCH 27/40] fix variable naming --- logwatch/logwatch_handlers.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/logwatch/logwatch_handlers.go b/logwatch/logwatch_handlers.go index d672d67c4..1ec027bb5 100644 --- a/logwatch/logwatch_handlers.go +++ b/logwatch/logwatch_handlers.go @@ -95,8 +95,8 @@ func (h FileLogHandler) GetTarget() LogTarget { return File } -func (h *FileLogHandler) SetRunId(executionId string) { - h.runId = executionId +func (h *FileLogHandler) SetRunId(runId string) { + h.runId = runId } func (h *FileLogHandler) GetRunId() string { @@ -181,8 +181,8 @@ func (h LokiLogHandler) GetTarget() LogTarget { return Loki } -func (h *LokiLogHandler) SetRunId(executionId string) { - h.runId = executionId +func (h *LokiLogHandler) SetRunId(runId string) { + h.runId = runId } func (h *LokiLogHandler) GetRunId() string { @@ -217,8 +217,8 @@ func (h InMemoryLogHandler) GetTarget() LogTarget { return InMemory } -func (h *InMemoryLogHandler) SetRunId(executionId string) { - h.runId = executionId +func (h *InMemoryLogHandler) SetRunId(runId string) { + h.runId = runId } func (h *InMemoryLogHandler) GetRunId() string { From 533fde34b356e3af202d49f2e28da9b0664bc977 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 15:34:07 -0300 Subject: [PATCH 28/40] added logwatch info to readme.md --- README.md | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/README.md b/README.md index 77525c748..53d784695 100644 --- a/README.md +++ b/README.md @@ -93,3 +93,77 @@ Modified packages or new packages get added and pushed to the `gh-pages` branch Removed charts do not trigger a re-publish, the packages have to be removed and the index file regenerated in the `gh-pages` branch of the [qa-charts](https://github.com/smartcontractkit/qa-charts) repository. Note: The qa-charts repository is scheduled to look for changes to the charts once every hour. This can be expedited by going to that repo and running the cd action via github UI. + +# Using LogWatch + +LogWatch is a package that allows to connect to a Docker container and then flush logs to configured targets. Currently 3 targets are supported: +* `file` - saves logs to a file in `./logs` folder +* `loki` - sends logs to Loki +* `in-memory` - stores logs in memory + +It can be configured to use multiple targets at once. If no target is specified, it becomes a no-op. + +Targets can be set in two ways: +* using `LOGWATCH_LOG_TARGETS` environment variable, e.g. `Loki,in-MemOry` (case insensitive) +* using programmatic functional option `WithLogTarget()` + +Functional option has higher priority than environment variable. + +When you connect a contaier LogWatch will create a new consumer and start a detached goroutine that listens to logs emitted by that container and which reconnects and re-requests logs if listening fails for whatever reason. Retry limit and timeout can both be configured using functional options. In most cases one container should have one consumer, but it's possible to have multiple consumers for one container. + +LogWatch stores all logs in gob temporary file. To actually send/save them, you need to flush them. When you do it, LogWatch will decode the file and send logs to configured targets. If log handling results in an error it won't be retried and processing of logs for given consumer will stop (if you think we should add a retry mechanism please let us know). + +*Important:* Flushing and accepting logs is blocking operation. That's because they both share the same cursor to temporary file and otherwise it's position would be racey and could result in mixed up logs. + +When using `in-memory` or `file` target no other environment variables are required. When using `loki` target, following environment variables are required: +* `LOKI_TENTANT_ID` - tenant ID +* `LOKI_URL` - Loki URL to which logs will be pushed +* `LOKI_BASIC_AUTH` + +You can print log location for each target using this function: `(m *LogWatch) PrintLogTargetsLocations()`. For `file` target it will print relative folder path, for `loki` it will print URL of a Grafana Dashboard scoped to current execution and container ids. For `in-memory` target it's no-op. + +It is recommended to shutdown LogWatch at the end of your tests. Here's an example: +```go + +t.Cleanup(func() { + l.Warn().Msg("Shutting down logwatch") + + if t.Failed() || os.Getenv("TEST_LOG_COLLECT") == "true" { + // we can't do much if this fails, so we just log the error + _ = logWatch.FlushLogsToTargets() + logWatch.PrintLogTargetsLocations() + logWatch.SaveLogLocationInTestSummary() + } + + // we can't do much if this fails, so we just log the error + _ = logWatch.Shutdown(testcontext.Get(b.t)) + }) +``` + +## Grouping test execution + +When running tests in CI you're probably interested in grouping logs by test execution, so that you can easily find the logs in Loki. To do that your job should set `RUN_ID` environment variable. In GHA it's recommended to set it to workflow id. If that variable is not set, then a run id will be automatically generated and saved in `.run.id` file, so that it can be shared by tests that are part of the same execution, but are running in different processes. + +## Test Summary + +In order to facilitate displaying information in GH's step summary `testsummary` package was added. It exposes a single function `AddEntry(testName, key string, value interface{}) `. When you call it, it either creates a test summary JSON file or appends to it. The result is is a map of keys with values. + +Example: +```JSON +{ + "file":[ + { + "test_name":"TestOCRv2Basic", + "value":"./logs/TestOCRv2Basic-2023-12-01T18-00-59-TestOCRv2Basic-38ac1e52-d0a6-48" + } + ], + "loki":[ + { + "test_name":"TestOCRv2Basic", + "value":"https://grafana.ops.prod.cldev.sh/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs?orgId=1\u0026var-run_id=TestOCRv2Basic-38ac1e52-d0a6-48\u0026var-container_id=cl-node-a179ca7d\u0026var-container_id=cl-node-76798f87\u0026var-container_id=cl-node-9ff7c3ae\u0026var-container_id=cl-node-43409b09\u0026var-container_id=cl-node-3b6810bd\u0026var-container_id=cl-node-69fed256\u0026from=1701449851165\u0026to=1701450124925" + } + ] +} +``` + +In GHA after tests have ended we can use tools like `jq` to extract the information we need and display it in step summary. \ No newline at end of file From f0640fccb7d1a8dc2a58a58280ac5b1cc601a716 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 15:36:48 -0300 Subject: [PATCH 29/40] rename logwatch to logstream --- README.md | 24 +-- .../logwatch.go => logstream/logstream.go | 56 +++---- .../logstream_handlers.go | 4 +- .../logstream_test.go | 154 +++++++++--------- .../logstream_user_loki_test.go | 8 +- .../logstream_user_test.go | 54 +++--- 6 files changed, 150 insertions(+), 150 deletions(-) rename logwatch/logwatch.go => logstream/logstream.go (91%) rename logwatch/logwatch_handlers.go => logstream/logstream_handlers.go (99%) rename logwatch/logwatch_test.go => logstream/logstream_test.go (81%) rename logwatch/logwatch_user_loki_test.go => logstream/logstream_user_loki_test.go (86%) rename logwatch/logwatch_user_test.go => logstream/logstream_user_test.go (76%) diff --git a/README.md b/README.md index 53d784695..5744fb4e3 100644 --- a/README.md +++ b/README.md @@ -94,9 +94,9 @@ Removed charts do not trigger a re-publish, the packages have to be removed and Note: The qa-charts repository is scheduled to look for changes to the charts once every hour. This can be expedited by going to that repo and running the cd action via github UI. -# Using LogWatch +# Using LogStream -LogWatch is a package that allows to connect to a Docker container and then flush logs to configured targets. Currently 3 targets are supported: +LogStream is a package that allows to connect to a Docker container and then flush logs to configured targets. Currently 3 targets are supported: * `file` - saves logs to a file in `./logs` folder * `loki` - sends logs to Loki * `in-memory` - stores logs in memory @@ -104,14 +104,14 @@ LogWatch is a package that allows to connect to a Docker container and then flus It can be configured to use multiple targets at once. If no target is specified, it becomes a no-op. Targets can be set in two ways: -* using `LOGWATCH_LOG_TARGETS` environment variable, e.g. `Loki,in-MemOry` (case insensitive) +* using `LOGSTREAM_LOG_TARGETS` environment variable, e.g. `Loki,in-MemOry` (case insensitive) * using programmatic functional option `WithLogTarget()` Functional option has higher priority than environment variable. -When you connect a contaier LogWatch will create a new consumer and start a detached goroutine that listens to logs emitted by that container and which reconnects and re-requests logs if listening fails for whatever reason. Retry limit and timeout can both be configured using functional options. In most cases one container should have one consumer, but it's possible to have multiple consumers for one container. +When you connect a contaier LogStream will create a new consumer and start a detached goroutine that listens to logs emitted by that container and which reconnects and re-requests logs if listening fails for whatever reason. Retry limit and timeout can both be configured using functional options. In most cases one container should have one consumer, but it's possible to have multiple consumers for one container. -LogWatch stores all logs in gob temporary file. To actually send/save them, you need to flush them. When you do it, LogWatch will decode the file and send logs to configured targets. If log handling results in an error it won't be retried and processing of logs for given consumer will stop (if you think we should add a retry mechanism please let us know). +LogStream stores all logs in gob temporary file. To actually send/save them, you need to flush them. When you do it, LogStream will decode the file and send logs to configured targets. If log handling results in an error it won't be retried and processing of logs for given consumer will stop (if you think we should add a retry mechanism please let us know). *Important:* Flushing and accepting logs is blocking operation. That's because they both share the same cursor to temporary file and otherwise it's position would be racey and could result in mixed up logs. @@ -120,23 +120,23 @@ When using `in-memory` or `file` target no other environment variables are requi * `LOKI_URL` - Loki URL to which logs will be pushed * `LOKI_BASIC_AUTH` -You can print log location for each target using this function: `(m *LogWatch) PrintLogTargetsLocations()`. For `file` target it will print relative folder path, for `loki` it will print URL of a Grafana Dashboard scoped to current execution and container ids. For `in-memory` target it's no-op. +You can print log location for each target using this function: `(m *LogStream) PrintLogTargetsLocations()`. For `file` target it will print relative folder path, for `loki` it will print URL of a Grafana Dashboard scoped to current execution and container ids. For `in-memory` target it's no-op. -It is recommended to shutdown LogWatch at the end of your tests. Here's an example: +It is recommended to shutdown LogStream at the end of your tests. Here's an example: ```go t.Cleanup(func() { - l.Warn().Msg("Shutting down logwatch") + l.Warn().Msg("Shutting down Log Stream") if t.Failed() || os.Getenv("TEST_LOG_COLLECT") == "true" { // we can't do much if this fails, so we just log the error - _ = logWatch.FlushLogsToTargets() - logWatch.PrintLogTargetsLocations() - logWatch.SaveLogLocationInTestSummary() + _ = logStream.FlushLogsToTargets() + logStream.PrintLogTargetsLocations() + logStream.SaveLogLocationInTestSummary() } // we can't do much if this fails, so we just log the error - _ = logWatch.Shutdown(testcontext.Get(b.t)) + _ = logStream.Shutdown(testcontext.Get(b.t)) }) ``` diff --git a/logwatch/logwatch.go b/logstream/logstream.go similarity index 91% rename from logwatch/logwatch.go rename to logstream/logstream.go index 012c87c28..32f785076 100644 --- a/logwatch/logwatch.go +++ b/logstream/logstream.go @@ -1,4 +1,4 @@ -package logwatch +package logstream import ( "context" @@ -45,9 +45,9 @@ type LogProducingContainer interface { Terminate(context.Context) error } -// LogWatch is a test helper struct to monitor docker container logs for some patterns +// LogStream is a test helper struct to monitor docker container logs for some patterns // and push their logs into Loki for further analysis -type LogWatch struct { +type LogStream struct { testName string log zerolog.Logger loki *wasp.LokiClient @@ -68,11 +68,11 @@ type LogContent struct { Time time.Time } -type Option func(*LogWatch) +type Option func(*LogStream) -// NewLogWatch creates a new LogWatch instance, with Loki client only if Loki log target is enabled (lazy init) -func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ...Option) (*LogWatch, error) { - l := logging.GetLogger(nil, "LOGWATCH_LOG_LEVEL").With().Str("Component", "LogWatch").Logger() +// NewLogStream creates a new LogStream instance, with Loki client only if Loki log target is enabled (lazy init) +func NewLogStream(t *testing.T, patterns map[string][]*regexp.Regexp, options ...Option) (*LogStream, error) { + l := logging.GetLogger(nil, "LOGWATCH_LOG_LEVEL").With().Str("Component", "LogStream").Logger() var testName string if t == nil { testName = NO_TEST @@ -90,7 +90,7 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... return nil, err } - logWatch := &LogWatch{ + logWatch := &LogStream{ testName: testName, log: l, consumers: make(map[string]*ContainerLogConsumer, 0), @@ -113,12 +113,12 @@ func NewLogWatch(t *testing.T, patterns map[string][]*regexp.Regexp, options ... handler.SetRunId(logWatch.runId) } - l.Info().Str("Run_id", logWatch.runId).Msg("LogWatch initialized") + l.Info().Str("Run_id", logWatch.runId).Msg("LogStream initialized") return logWatch, nil } -func (m *LogWatch) validateLogTargets() error { +func (m *LogStream) validateLogTargets() error { // check if all requested log targets are supported for _, wantedTarget := range m.enabledLogTargets { found := false @@ -150,32 +150,32 @@ func (m *LogWatch) validateLogTargets() error { } if len(m.logTargetHandlers) == 0 { - m.log.Warn().Msg("No log targets enabled. LogWatch will not persist any logs") + m.log.Warn().Msg("No log targets enabled. LogStream will not persist any logs") } return nil } func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { - return func(lw *LogWatch) { + return func(lw *LogStream) { lw.logTargetHandlers[logTarget] = handler } } func WithLogTarget(logTarget LogTarget) Option { - return func(lw *LogWatch) { + return func(lw *LogStream) { lw.enabledLogTargets = append(lw.enabledLogTargets, logTarget) } } func WithLogProducerTimeout(timeout time.Duration) Option { - return func(lw *LogWatch) { + return func(lw *LogStream) { lw.logProducerTimeout = timeout } } func WithLogProducerRetryLimit(retryLimit int) Option { - return func(lw *LogWatch) { + return func(lw *LogStream) { lw.logProducerTimeoutRetryLimit = retryLimit } } @@ -188,7 +188,7 @@ func fibonacci(n int) int { } // ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer -func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { +func (m *LogStream) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { name, err := container.Name(ctx) if err != nil { return err @@ -322,12 +322,12 @@ func (m *LogWatch) ConnectContainer(ctx context.Context, container LogProducingC return err } -func (m *LogWatch) GetConsumers() map[string]*ContainerLogConsumer { +func (m *LogStream) GetConsumers() map[string]*ContainerLogConsumer { return m.consumers } // Shutdown disconnects all containers, stops notifications -func (m *LogWatch) Shutdown(context context.Context) error { +func (m *LogStream) Shutdown(context context.Context) error { var err error for _, c := range m.consumers { if stopErr := c.Stop(); stopErr != nil { @@ -362,20 +362,20 @@ func (m *LogWatch) Shutdown(context context.Context) error { type LogWriter = func(testName string, name string, location interface{}) error -func (m *LogWatch) PrintLogTargetsLocations() { +func (m *LogStream) PrintLogTargetsLocations() { m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { m.log.Info().Str("Test", testName).Str("Handler", name).Interface("Location", location).Msg("Log location") return nil }) } -func (m *LogWatch) SaveLogLocationInTestSummary() { +func (m *LogStream) SaveLogLocationInTestSummary() { m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { return testsummary.AddEntry(testName, name, location) }) } -func (m *LogWatch) SaveLogTargetsLocations(writer LogWriter) { +func (m *LogStream) SaveLogTargetsLocations(writer LogWriter) { for _, handler := range m.logTargetHandlers { name := string(handler.GetTarget()) location, err := handler.GetLogLocation(m.consumers) @@ -407,7 +407,7 @@ func (g *ContainerLogConsumer) Stop() error { } // DisconnectContainer disconnects the particular container -func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { +func (m *LogStream) DisconnectContainer(container LogProducingContainer) error { if container.IsRunning() { m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") return container.StopLogProducer() @@ -417,7 +417,7 @@ func (m *LogWatch) DisconnectContainer(container LogProducingContainer) error { } // ContainerLogs return all logs for the particular container -func (m *LogWatch) ContainerLogs(name string) ([]string, error) { +func (m *LogStream) ContainerLogs(name string) ([]string, error) { logs := []string{} var getLogsFn = func(consumer *ContainerLogConsumer, log LogContent) error { if consumer.name == name { @@ -441,7 +441,7 @@ func NoOpConsumerFn(consumer *ContainerLogConsumer) error { return nil } -func (m *LogWatch) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn) (loopErr error) { +func (m *LogStream) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn) (loopErr error) { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() @@ -533,7 +533,7 @@ func (m *LogWatch) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consum } // FlushLogsToTargets flushes all logs for all consumers (containers) to their targets -func (m *LogWatch) FlushLogsToTargets() error { +func (m *LogStream) FlushLogsToTargets() error { var preExecuteFn = func(consumer *ContainerLogConsumer) error { // do not accept any new logs consumer.isDone = true @@ -582,7 +582,7 @@ type ContainerLogConsumer struct { name string prefix string logTargets []LogTarget - lw *LogWatch + lw *LogStream tempFile *os.File encoder *gob.Encoder isDone bool @@ -595,7 +595,7 @@ type ContainerLogConsumer struct { // newContainerLogConsumer creates new log consumer for a container that // - signal if log line matches the pattern // - push all lines to configured log targets -func newContainerLogConsumer(ctx context.Context, lw *LogWatch, container LogProducingContainer, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { +func newContainerLogConsumer(ctx context.Context, lw *LogStream, container LogProducingContainer, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { containerName, err := container.Name(ctx) if err != nil { return nil, err @@ -748,7 +748,7 @@ func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { } func getLogTargetsFromEnv() ([]LogTarget, error) { - envLogTargetsValue := os.Getenv("LOGWATCH_LOG_TARGETS") + envLogTargetsValue := os.Getenv("LOGSTREAM_LOG_TARGETS") if envLogTargetsValue != "" { envLogTargets := make([]LogTarget, 0) for _, target := range strings.Split(envLogTargetsValue, ",") { diff --git a/logwatch/logwatch_handlers.go b/logstream/logstream_handlers.go similarity index 99% rename from logwatch/logwatch_handlers.go rename to logstream/logstream_handlers.go index 1ec027bb5..41f7736be 100644 --- a/logwatch/logwatch_handlers.go +++ b/logstream/logstream_handlers.go @@ -1,4 +1,4 @@ -package logwatch +package logstream import ( "fmt" @@ -128,7 +128,7 @@ func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) err } // we can notify more than one time if it matches, but we push only once _ = c.lw.loki.Handle(model.LabelSet{ - "type": "log_watch", + "type": "log_stream", "test": model.LabelValue(content.TestName), "container_id": model.LabelValue(content.ContainerName), "run_id": model.LabelValue(h.runId), diff --git a/logwatch/logwatch_test.go b/logstream/logstream_test.go similarity index 81% rename from logwatch/logwatch_test.go rename to logstream/logstream_test.go index 76d18eb50..1db2a722f 100644 --- a/logwatch/logwatch_test.go +++ b/logstream/logstream_test.go @@ -1,4 +1,4 @@ -package logwatch_test +package logstream_test import ( "context" @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" - "github.com/smartcontractkit/chainlink-testing-framework/logwatch" + "github.com/smartcontractkit/chainlink-testing-framework/logstream" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" ) @@ -26,7 +26,7 @@ type TestCase struct { msgsIntervalSeconds float64 exitEarly bool mustNotifyList map[string][]*regexp.Regexp - expectedNotifications map[string][]*logwatch.LogNotification + expectedNotifications map[string][]*logstream.LogNotification } // replaceContainerNamePlaceholders this function is used to replace container names with dynamic values @@ -82,7 +82,7 @@ func startTestContainer(ctx context.Context, containerName string, msg string, a }) } -func TestLogWatchDocker(t *testing.T) { +func TestLogStreamDocker(t *testing.T) { tests := []TestCase{ { name: "should read exactly 10 streams (1 container)", @@ -134,7 +134,7 @@ func TestLogWatchDocker(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) dynamicContainerNames := replaceContainerNamePlaceholders(tc) - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) require.NoError(t, err) for _, cn := range dynamicContainerNames { @@ -183,15 +183,15 @@ func TestLogWatchDocker(t *testing.T) { } } -func TestLogWatchConnectWithDelayDocker(t *testing.T) { +func TestLogStreamConnectWithDelayDocker(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) - containerName := fmt.Sprintf("%s-container-%s", "TestLogWatchConnectRetryDocker", uuid.NewString()) + containerName := fmt.Sprintf("%s-container-%s", "TestLogStreamConnectRetryDocker", uuid.NewString()) message := "message" interval := float64(1) amount := 10 - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) require.NoError(t, err) container, err := startTestContainer(ctx, containerName, message, amount, interval, false) require.NoError(t, err) @@ -212,7 +212,7 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } if err := container.Terminate(ctx); err != nil { t.Fatalf("failed to terminate container: %s", err.Error()) @@ -220,7 +220,7 @@ func TestLogWatchConnectWithDelayDocker(t *testing.T) { }) } -func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { +func TestLogStream_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) containerName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) @@ -228,7 +228,7 @@ func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { interval := float64(1) amount := 10 - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) require.NoError(t, err) container, err := startTestContainer(ctx, containerName, message, amount, interval, false) require.NoError(t, err) @@ -244,7 +244,7 @@ func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { count := 0 logsProcessed := []string{} - var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + var testFn = func(consumer *logstream.ContainerLogConsumer, log logstream.LogContent) error { if count < 5 { logsProcessed = append(logsProcessed, string(log.Content)) count++ @@ -254,14 +254,14 @@ func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { return errors.New(expectedErrorText) } - err = lw.GetAllLogsAndConsume(logwatch.NoOpConsumerFn, testFn) + err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn) require.Error(t, err, "should fail to get all logs") require.Equal(t, err.Error(), expectedErrorText, "should fail with test error") require.Equal(t, 5, len(logsProcessed), "should process 5 logs") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } if err := container.Terminate(ctx); err != nil { t.Fatalf("failed to terminate container: %s", err.Error()) @@ -269,7 +269,7 @@ func TestLogWatch_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { }) } -func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) { +func TestLogStream_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) containerName_1 := fmt.Sprintf("container-%s", uuid.NewString()) @@ -278,7 +278,7 @@ func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) interval := float64(1) amount := 10 - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) require.NoError(t, err) container_1, err := startTestContainer(ctx, containerName_1, message, amount, interval, false) require.NoError(t, err) @@ -298,7 +298,7 @@ func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) count := 0 logsProcessed := map[string][]string{} - var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + var testFn = func(consumer *logstream.ContainerLogConsumer, log logstream.LogContent) error { name, _ := consumer.GetContainer().Name(ctx) name = strings.TrimPrefix(name, "/") if name == containerName_2 || count < 5 { @@ -314,7 +314,7 @@ func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) return errors.New(expectedErrorText) } - err = lw.GetAllLogsAndConsume(logwatch.NoOpConsumerFn, testFn) + err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn) require.Error(t, err, "should fail to get all logs") require.Equal(t, expectedErrorText, err.Error(), "should fail with test error") require.Equal(t, 5, len(logsProcessed[containerName_1]), "should process 5 logs for first container") @@ -322,7 +322,7 @@ func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } if err := container_1.Terminate(ctx); err != nil { t.Fatalf("failed to terminate first ontainer: %s", err.Error()) @@ -333,7 +333,7 @@ func TestLogWatch_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T) }) } -func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { +func TestLogStream_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) containerName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) @@ -341,7 +341,7 @@ func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { interval := float64(1) amount := 10 - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) require.NoError(t, err) container, err := startTestContainer(ctx, containerName, message, amount, interval, false) require.NoError(t, err) @@ -355,7 +355,7 @@ func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { count := 0 logsProcessed := []string{} - var testFn = func(consumer *logwatch.ContainerLogConsumer, log logwatch.LogContent) error { + var testFn = func(consumer *logstream.ContainerLogConsumer, log logstream.LogContent) error { if count < 5 { logsProcessed = append(logsProcessed, string(log.Content)) count++ @@ -366,7 +366,7 @@ func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { } expectedErrorText := "pre-execute test error" - var errorConsumerFn = func(consumer *logwatch.ContainerLogConsumer) error { + var errorConsumerFn = func(consumer *logstream.ContainerLogConsumer) error { return errors.New(expectedErrorText) } @@ -377,7 +377,7 @@ func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } if err := container.Terminate(ctx); err != nil { t.Fatalf("failed to terminate container: %s", err.Error()) @@ -385,7 +385,7 @@ func TestLogWatch_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { }) } -func TestLogWatchTwoDockerContainers(t *testing.T) { +func TestLogStreamTwoDockerContainers(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) containerOneName := fmt.Sprintf("%s-container-%s", t.Name(), uuid.NewString()) @@ -395,8 +395,8 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { amountFirst := 10 amountSecond := 20 - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") containerOne, err := startTestContainer(ctx, containerOneName, message, amountFirst, interval, false) require.NoError(t, err, "should not fail to start container") @@ -404,10 +404,10 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { require.NoError(t, err, "should not fail to start container") err = lw.ConnectContainer(context.Background(), containerOne, containerOneName) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") err = lw.ConnectContainer(context.Background(), containerTwo, containerTwoName) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amountSecond)))*time.Second + 5*time.Second) @@ -418,7 +418,7 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { stopErr := c.Stop() require.NoError(t, stopErr, "should not fail to stop log producer") err = lw.DisconnectContainer(containerOne) - require.NoError(t, err, "log watch should disconnect from container") + require.NoError(t, err, "logstream should disconnect from container") } } @@ -435,7 +435,7 @@ func TestLogWatchTwoDockerContainers(t *testing.T) { t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } if err := containerOne.Terminate(ctx); err != nil { t.Fatalf("failed to terminate first container: %s", err.Error()) @@ -531,9 +531,9 @@ func (m *MockedLogProducingContainer) SendLog(msg string) { m.messages = append(m.messages, msg) } -// secenario: log watch consumes a log, then the container returns an error, log watch reconnects -// and consumes logs again. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_FailsOnce(t *testing.T) { +// secenario: logstream consumes a log, then the container returns an error, logstream reconnects +// and consumes logs again. logstream should not miss any logs nor consume any log twice +func TestLogStreamConnectRetryMockContainer_FailsOnce(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -549,11 +549,11 @@ func TestLogWatchConnectRetryMockContainer_FailsOnce(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogProducerTimeout(1*time.Second), logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") go func() { - // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + // wait for 1 second, so that logstream has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second logs, err := lw.ContainerLogs(mockedContainer.name) @@ -577,25 +577,25 @@ func TestLogWatchConnectRetryMockContainer_FailsOnce(t *testing.T) { }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 3*time.Second) logs, err := lw.ContainerLogs(mockedContainer.name) require.NoError(t, err, "should not fail to get logs") - require.EqualValues(t, logs, logsSent, "log watch should receive all logs") + require.EqualValues(t, logs, logsSent, "logstream should receive all logs") require.Equal(t, 2, mockedContainer.startCounter, "log producer should be started twice") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } }) } -// secenario: log watch consumes a log, then the container returns an error, log watch reconnects -// and consumes logs again, then it happens again. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_FailsTwice(t *testing.T) { +// secenario: logstream consumes a log, then the container returns an error, logstream reconnects +// and consumes logs again, then it happens again. logstream should not miss any logs nor consume any log twice +func TestLogStreamConnectRetryMockContainer_FailsTwice(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -611,11 +611,11 @@ func TestLogWatchConnectRetryMockContainer_FailsTwice(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogProducerTimeout(1*time.Second), logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") go func() { - // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + // wait for 1 second, so that logstream has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second logs, err := lw.ContainerLogs(mockedContainer.name) @@ -651,26 +651,26 @@ func TestLogWatchConnectRetryMockContainer_FailsTwice(t *testing.T) { }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) logs, err := lw.ContainerLogs(mockedContainer.name) require.NoError(t, err, "should not fail to get logs") - require.EqualValues(t, logs, logsSent, "log watch should receive all logs") + require.EqualValues(t, logs, logsSent, "logstream should receive all logs") require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started twice") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } }) } -// secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer -// is still running, but finally it stops and log watch reconnects. log watch should not miss any logs nor consume any log twice -func TestLogWatchConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { +// secenario: it consumes a log, then the container returns an error, but when logstream tries to reconnect log producer +// is still running, but finally it stops and logstream reconnects. logstream should not miss any logs nor consume any log twice +func TestLogStreamConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -686,11 +686,11 @@ func TestLogWatchConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogProducerTimeout(1*time.Second), logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") go func() { - // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + // wait for 1 second, so that logstream has time to consume at least one log before it's stopped time.Sleep(1 * time.Second) mockedContainer.startSleep = 1 * time.Second logs, err := lw.ContainerLogs(mockedContainer.name) @@ -719,26 +719,26 @@ func TestLogWatchConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) logs, err := lw.ContainerLogs(mockedContainer.name) require.NoError(t, err, "should not fail to get logs") - require.EqualValues(t, logsSent, logs, "log watch should receive all logs") + require.EqualValues(t, logsSent, logs, "logstream should receive all logs") require.Equal(t, 3, mockedContainer.startCounter, "log producer should be started four times") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } }) } -// secenario: it consumes a log, then the container returns an error, but when log watch tries to reconnect log producer -// is still running and log watch never reconnects. log watch should have no logs (we could improve that in the future) -func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { +// secenario: it consumes a log, then the container returns an error, but when logstream tries to reconnect log producer +// is still running and logstream never reconnects. logstream should have no logs (we could improve that in the future) +func TestLogStreamConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid := uuid.NewString() @@ -754,11 +754,11 @@ func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerRetryLimit(4), logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogProducerTimeout(1*time.Second), logstream.WithLogProducerRetryLimit(4), logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") go func() { - // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + // wait for 1 second, so that logstream has time to consume at least one log before it's stopped time.Sleep(6 * time.Second) mockedContainer.startSleep = 1 * time.Second logs, err := lw.ContainerLogs(mockedContainer.name) @@ -780,26 +780,26 @@ func TestLogWatchConnectRetryMockContainer_AlwaysFailsRestart(t *testing.T) { }() err = lw.ConnectContainer(context.Background(), mockedContainer, mockedContainer.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amount)))*time.Second + 5*time.Second) // it should still salvage 6 logs that were consumed before error was injected and restarting failed logs, err := lw.ContainerLogs(mockedContainer.name) require.NoError(t, err, "should not fail to get logs") - require.Equal(t, 0, len(logs), "log watch should have no logs") + require.Equal(t, 0, len(logs), "logstream should have no logs") require.Equal(t, 5, mockedContainer.startCounter, "log producer should be started seven times") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } }) } // scenario: log listening loops are independent for all containers/consumers and even if one of them stops // due to errors, second one continues and receives all logs -func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWorks(t *testing.T) { +func TestLogStreamConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWorks(t *testing.T) { t.Parallel() ctx := testcontext.Get(t) uuid_1 := uuid.NewString() @@ -826,11 +826,11 @@ func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWor errorChannelError: nil, } - lw, err := logwatch.NewLogWatch(t, nil, logwatch.WithLogProducerTimeout(1*time.Second), logwatch.WithLogProducerRetryLimit(4), logwatch.WithLogTarget(logwatch.InMemory)) - require.NoError(t, err, "log watch should be created") + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogProducerTimeout(1*time.Second), logstream.WithLogProducerRetryLimit(4), logstream.WithLogTarget(logstream.InMemory)) + require.NoError(t, err, "logstream should be created") go func() { - // wait for 1 second, so that log watch has time to consume at least one log before it's stopped + // wait for 1 second, so that logstream has time to consume at least one log before it's stopped time.Sleep(6 * time.Second) mockedContainer_1.startSleep = 1 * time.Second logs, err := lw.ContainerLogs(mockedContainer_1.name) @@ -862,27 +862,27 @@ func TestLogWatchConnectRetryTwoMockContainers_FirstAlwaysFailsRestart_SecondWor }() err = lw.ConnectContainer(context.Background(), mockedContainer_1, mockedContainer_1.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") err = lw.ConnectContainer(context.Background(), mockedContainer_2, mockedContainer_2.name) - require.NoError(t, err, "log watch should connect to container") + require.NoError(t, err, "logstream should connect to container") time.Sleep(time.Duration(int(interval*float64(amountSecond)))*time.Second + 5*time.Second) logs_1, err := lw.ContainerLogs(mockedContainer_1.name) require.NoError(t, err, "should not fail to get logs") - require.Equal(t, 0, len(logs_1), "log watch should have no logs") + require.Equal(t, 0, len(logs_1), "logstream should have no logs") require.Equal(t, 5, mockedContainer_1.startCounter, "log producer should be started seven times for first container") logs_2, err := lw.ContainerLogs(mockedContainer_2.name) require.NoError(t, err, "should not fail to get logs") - require.Equal(t, amountSecond, len(logs_2), "log watch should have all logs for second container") - require.EqualValues(t, logsSent, logs_2, "log watch had different logs for second container than expected") + require.Equal(t, amountSecond, len(logs_2), "logstream should have all logs for second container") + require.EqualValues(t, logsSent, logs_2, "logstream had different logs for second container than expected") require.Equal(t, 1, mockedContainer_2.startCounter, "log producer should be started one time for second container") t.Cleanup(func() { if err := lw.Shutdown(ctx); err != nil { - t.Fatalf("failed to shutodwn logwatch: %s", err.Error()) + t.Fatalf("failed to shutodwn logstream: %s", err.Error()) } }) } diff --git a/logwatch/logwatch_user_loki_test.go b/logstream/logstream_user_loki_test.go similarity index 86% rename from logwatch/logwatch_user_loki_test.go rename to logstream/logstream_user_loki_test.go index fb3d9a314..1b4e29455 100644 --- a/logwatch/logwatch_user_loki_test.go +++ b/logstream/logstream_user_loki_test.go @@ -1,4 +1,4 @@ -package logwatch_test +package logstream_test import ( "os" @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-testing-framework/logwatch" + "github.com/smartcontractkit/chainlink-testing-framework/logstream" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" ) @@ -15,7 +15,7 @@ import ( func TestExampleLokiStreaming(t *testing.T) { t.Skip("uncomment and run manually") - os.Setenv("LOGWATCH_LOG_TARGETS", "loki") + os.Setenv("LOGSTREAM_LOG_TARGETS", "loki") tests := []testData{ { name: "stream all container logs to Loki, subtest 1", @@ -45,7 +45,7 @@ func TestExampleLokiStreaming(t *testing.T) { // nolint defer d.Shutdown(ctx) require.NoError(t, err) - lw, err := logwatch.NewLogWatch(t, nil) + lw, err := logstream.NewLogStream(t, nil) require.NoError(t, err) err = d.ConnectLogs(lw) require.NoError(t, err) diff --git a/logwatch/logwatch_user_test.go b/logstream/logstream_user_test.go similarity index 76% rename from logwatch/logwatch_user_test.go rename to logstream/logstream_user_test.go index 84cff2224..e2397fac1 100644 --- a/logwatch/logwatch_user_test.go +++ b/logstream/logstream_user_test.go @@ -1,4 +1,4 @@ -package logwatch_test +package logstream_test import ( "bytes" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" - "github.com/smartcontractkit/chainlink-testing-framework/logwatch" + "github.com/smartcontractkit/chainlink-testing-framework/logstream" ) /* These tests are for user-facing API */ @@ -53,7 +53,7 @@ func (m *MyDeployment) Shutdown(ctx context.Context) error { } /* That's what you need to implement to have your logs send to your chosen targets */ -func (m *MyDeployment) ConnectLogs(lw *logwatch.LogWatch) error { +func (m *MyDeployment) ConnectLogs(lw *logstream.LogStream) error { for _, c := range m.containers { if err := lw.ConnectContainer(context.Background(), c, ""); err != nil { return err @@ -77,12 +77,12 @@ func TestFileLoggingTarget(t *testing.T) { // nolint defer d.Shutdown(ctx) require.NoError(t, err) - lw, err := logwatch.NewLogWatch( + lw, err := logstream.NewLogStream( t, nil, - logwatch.WithLogTarget(logwatch.File), + logstream.WithLogTarget(logstream.File), ) - require.NoError(t, err, "failed to create logwatch") + require.NoError(t, err, "failed to create logstream") err = d.ConnectLogs(lw) require.NoError(t, err, "failed to connect logs") @@ -107,25 +107,25 @@ func TestFileLoggingTarget(t *testing.T) { require.True(t, bytes.Contains(content, C), "C should be present in log file") err = lw.Shutdown(ctx) - require.NoError(t, err, "failed to shutdown logwatch") + require.NoError(t, err, "failed to shutdown logstream") } type MockedLogHandler struct { - logs []logwatch.LogContent - Target logwatch.LogTarget + logs []logstream.LogContent + Target logstream.LogTarget executionId string } -func (m *MockedLogHandler) Handle(consumer *logwatch.ContainerLogConsumer, content logwatch.LogContent) error { +func (m *MockedLogHandler) Handle(consumer *logstream.ContainerLogConsumer, content logstream.LogContent) error { m.logs = append(m.logs, content) return nil } -func (m *MockedLogHandler) GetLogLocation(consumers map[string]*logwatch.ContainerLogConsumer) (string, error) { +func (m *MockedLogHandler) GetLogLocation(consumers map[string]*logstream.ContainerLogConsumer) (string, error) { return "", nil } -func (m *MockedLogHandler) GetTarget() logwatch.LogTarget { +func (m *MockedLogHandler) GetTarget() logstream.LogTarget { return m.Target } @@ -144,17 +144,17 @@ func TestMultipleMockedLoggingTargets(t *testing.T) { // nolint defer d.Shutdown(ctx) require.NoError(t, err) - mockedFileHandler := &MockedLogHandler{Target: logwatch.File} - mockedLokiHanlder := &MockedLogHandler{Target: logwatch.Loki} - lw, err := logwatch.NewLogWatch( + mockedFileHandler := &MockedLogHandler{Target: logstream.File} + mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} + lw, err := logstream.NewLogStream( t, nil, - logwatch.WithCustomLogHandler(logwatch.File, mockedFileHandler), - logwatch.WithCustomLogHandler(logwatch.Loki, mockedLokiHanlder), - logwatch.WithLogTarget(logwatch.Loki), - logwatch.WithLogTarget(logwatch.File), + logstream.WithCustomLogHandler(logstream.File, mockedFileHandler), + logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), + logstream.WithLogTarget(logstream.Loki), + logstream.WithLogTarget(logstream.File), ) - require.NoError(t, err, "failed to create logwatch") + require.NoError(t, err, "failed to create logstream") err = d.ConnectLogs(lw) require.NoError(t, err, "failed to connect logs") @@ -166,7 +166,7 @@ func TestMultipleMockedLoggingTargets(t *testing.T) { assertMockedHandlerHasLogs(t, mockedLokiHanlder) err = lw.Shutdown(ctx) - require.NoError(t, err, "failed to shutdown logwatch") + require.NoError(t, err, "failed to shutdown logstream") } func TestOneMockedLoggingTarget(t *testing.T) { @@ -176,14 +176,14 @@ func TestOneMockedLoggingTarget(t *testing.T) { // nolint defer d.Shutdown(ctx) require.NoError(t, err) - mockedLokiHanlder := &MockedLogHandler{Target: logwatch.Loki} - lw, err := logwatch.NewLogWatch( + mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} + lw, err := logstream.NewLogStream( t, nil, - logwatch.WithCustomLogHandler(logwatch.Loki, mockedLokiHanlder), - logwatch.WithLogTarget(logwatch.Loki), + logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), + logstream.WithLogTarget(logstream.Loki), ) - require.NoError(t, err, "failed to create logwatch") + require.NoError(t, err, "failed to create logstream") err = d.ConnectLogs(lw) require.NoError(t, err, "failed to connect logs") @@ -194,7 +194,7 @@ func TestOneMockedLoggingTarget(t *testing.T) { assertMockedHandlerHasLogs(t, mockedLokiHanlder) err = lw.Shutdown(ctx) - require.NoError(t, err, "failed to shutdown logwatch") + require.NoError(t, err, "failed to shutdown logstream") } func assertMockedHandlerHasLogs(t *testing.T, handler *MockedLogHandler) { From 786d2aa351ac64dfecfe29a8cd1037de64e30689 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 5 Dec 2023 15:56:47 -0300 Subject: [PATCH 30/40] return loki error from handler --- logstream/logstream_handlers.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/logstream/logstream_handlers.go b/logstream/logstream_handlers.go index 41f7736be..bcf9df243 100644 --- a/logstream/logstream_handlers.go +++ b/logstream/logstream_handlers.go @@ -126,15 +126,15 @@ func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) err } c.lw.loki = loki } - // we can notify more than one time if it matches, but we push only once - _ = c.lw.loki.Handle(model.LabelSet{ + + err := c.lw.loki.Handle(model.LabelSet{ "type": "log_stream", "test": model.LabelValue(content.TestName), "container_id": model.LabelValue(content.ContainerName), "run_id": model.LabelValue(h.runId), }, content.Time, string(content.Content)) - return nil + return err } func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsumer) (string, error) { From be4e9cdb04af55eee8fb95f947997c2e8fc9b991 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 08:44:09 -0300 Subject: [PATCH 31/40] add go docs --- Makefile | 2 +- logstream/logstream.go | 39 +++++--- logstream/logstream_handler_test.go | 127 ++++++++++++++++++++++++++ logstream/logstream_handlers.go | 6 +- logstream/logstream_test.go | 2 +- logstream/logstream_user_loki_test.go | 4 +- logstream/logstream_user_test.go | 115 ----------------------- 7 files changed, 160 insertions(+), 135 deletions(-) create mode 100644 logstream/logstream_handler_test.go diff --git a/Makefile b/Makefile index 1ce6a9389..d193c33ba 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ compile_contracts: python3 ./utils/compile_contracts.py test_unit: install_gotestfmt - go test -json -cover -covermode=count -coverprofile=unit-test-coverage.out ./client ./gauntlet ./testreporters ./docker/test_env ./k8s/config ./logwatch 2>&1 | tee /tmp/gotest.log | gotestfmt + go test -json -cover -covermode=count -coverprofile=unit-test-coverage.out ./client ./gauntlet ./testreporters ./docker/test_env ./k8s/config ./logstream 2>&1 | tee /tmp/gotest.log | gotestfmt ####################### diff --git a/logstream/logstream.go b/logstream/logstream.go index 32f785076..17adb57c6 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -34,6 +34,7 @@ type LogNotification struct { Log string } +// LogProducingContainer is a facade that needs to be implemented by any container that wants to be connected to LogStream type LogProducingContainer interface { Name(ctx context.Context) (string, error) FollowOutput(consumer testcontainers.LogConsumer) @@ -61,6 +62,7 @@ type LogStream struct { runId string } +// LogContent is a representation of log that will be send to Loki type LogContent struct { TestName string ContainerName string @@ -118,8 +120,8 @@ func NewLogStream(t *testing.T, patterns map[string][]*regexp.Regexp, options .. return logWatch, nil } +// validateLogTargets validates that all enabled log targets have a handler and disables handlers that are not enabled func (m *LogStream) validateLogTargets() error { - // check if all requested log targets are supported for _, wantedTarget := range m.enabledLogTargets { found := false for knownTarget := range m.logTargetHandlers { @@ -134,7 +136,6 @@ func (m *LogStream) validateLogTargets() error { } } - // deactivate known log targets that are not enabled for knownTarget := range m.logTargetHandlers { wanted := false for _, wantedTarget := range m.enabledLogTargets { @@ -156,30 +157,35 @@ func (m *LogStream) validateLogTargets() error { return nil } +// WithCustomLogHandler allows to override default log handler for particular log target func WithCustomLogHandler(logTarget LogTarget, handler HandleLogTarget) Option { return func(lw *LogStream) { lw.logTargetHandlers[logTarget] = handler } } +// WithLogTarget allows setting log targets programmatically (also overrides LOGSTREAM_LOG_TARGETS env var) func WithLogTarget(logTarget LogTarget) Option { return func(lw *LogStream) { lw.enabledLogTargets = append(lw.enabledLogTargets, logTarget) } } +// WithLogProducerTimeout allows to override default log producer timeout of 5 seconds func WithLogProducerTimeout(timeout time.Duration) Option { return func(lw *LogStream) { lw.logProducerTimeout = timeout } } +// WithLogProducerRetryLimit allows to override default log producer retry limit of 10 func WithLogProducerRetryLimit(retryLimit int) Option { return func(lw *LogStream) { lw.logProducerTimeoutRetryLimit = retryLimit } } +// fibonacci is a helper function for retrying log producer func fibonacci(n int) int { if n <= 1 { return n @@ -187,7 +193,7 @@ func fibonacci(n int) int { return fibonacci(n-1) + fibonacci(n-2) } -// ConnectContainer connects consumer to selected container and starts testcontainers.LogProducer +// ConnectContainer connects consumer to selected container, starts testcontainers.LogProducer and listens to it's failures in a detached goroutine func (m *LogStream) ConnectContainer(ctx context.Context, container LogProducingContainer, prefix string) error { name, err := container.Name(ctx) if err != nil { @@ -309,11 +315,8 @@ func (m *LogStream) ConnectContainer(ctx context.Context, container LogProducing return } - - // time.Sleep(500 * time.Millisecond) } case <-done: - fmt.Printf("DONE") return } } @@ -322,11 +325,12 @@ func (m *LogStream) ConnectContainer(ctx context.Context, container LogProducing return err } +// GetConsumers returns all consumers func (m *LogStream) GetConsumers() map[string]*ContainerLogConsumer { return m.consumers } -// Shutdown disconnects all containers, stops notifications +// Shutdown disconnects all containers and stops all consumers func (m *LogStream) Shutdown(context context.Context) error { var err error for _, c := range m.consumers { @@ -362,6 +366,7 @@ func (m *LogStream) Shutdown(context context.Context) error { type LogWriter = func(testName string, name string, location interface{}) error +// PrintLogTargetsLocations prints all log targets locations to stdout func (m *LogStream) PrintLogTargetsLocations() { m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { m.log.Info().Str("Test", testName).Str("Handler", name).Interface("Location", location).Msg("Log location") @@ -369,12 +374,14 @@ func (m *LogStream) PrintLogTargetsLocations() { }) } +// SaveLogTargetsLocations saves all log targets locations to test summary func (m *LogStream) SaveLogLocationInTestSummary() { m.SaveLogTargetsLocations(func(testName string, name string, location interface{}) error { return testsummary.AddEntry(testName, name, location) }) } +// SaveLogTargetsLocations saves all log targets given writer func (m *LogStream) SaveLogTargetsLocations(writer LogWriter) { for _, handler := range m.logTargetHandlers { name := string(handler.GetTarget()) @@ -390,6 +397,7 @@ func (m *LogStream) SaveLogTargetsLocations(writer LogWriter) { } } +// Stop stops the consumer and closes temp file func (g *ContainerLogConsumer) Stop() error { if g.isDone { return nil @@ -406,7 +414,7 @@ func (g *ContainerLogConsumer) Stop() error { return nil } -// DisconnectContainer disconnects the particular container +// DisconnectContainer disconnects particular container func (m *LogStream) DisconnectContainer(container LogProducingContainer) error { if container.IsRunning() { m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") @@ -416,7 +424,7 @@ func (m *LogStream) DisconnectContainer(container LogProducingContainer) error { return nil } -// ContainerLogs return all logs for the particular container +// ContainerLogs return all logs for particular container func (m *LogStream) ContainerLogs(name string) ([]string, error) { logs := []string{} var getLogsFn = func(consumer *ContainerLogConsumer, log LogContent) error { @@ -437,10 +445,12 @@ func (m *LogStream) ContainerLogs(name string) ([]string, error) { type ConsumerConsumingFn = func(consumer *ContainerLogConsumer) error type ConsumerLogConsumingFn = func(consumer *ContainerLogConsumer, log LogContent) error +// NoOpConsumerFn is a no-op consumer function func NoOpConsumerFn(consumer *ContainerLogConsumer) error { return nil } +// GetAllLogsAndConsume gets all logs for all consumers (containers) and consumes them using consumeLogFn func (m *LogStream) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn) (loopErr error) { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() @@ -592,9 +602,7 @@ type ContainerLogConsumer struct { firstLogTs time.Time } -// newContainerLogConsumer creates new log consumer for a container that -// - signal if log line matches the pattern -// - push all lines to configured log targets +// newContainerLogConsumer creates new log consumer for a container that saves logs to a temp file func newContainerLogConsumer(ctx context.Context, lw *LogStream, container LogProducingContainer, prefix string, logTargets ...LogTarget) (*ContainerLogConsumer, error) { containerName, err := container.Name(ctx) if err != nil { @@ -630,10 +638,12 @@ func newContainerLogConsumer(ctx context.Context, lw *LogStream, container LogPr return consumer, nil } +// GetStartTime returns the time of the first log line func (g *ContainerLogConsumer) GetStartTime() time.Time { return g.firstLogTs } +// ResetTempFile resets the temp file and gob encoder func (g *ContainerLogConsumer) ResetTempFile() error { if g.tempFile != nil { if err := g.tempFile.Close(); err != nil { @@ -652,12 +662,14 @@ func (g *ContainerLogConsumer) ResetTempFile() error { return nil } +// MarkAsErrored marks the consumer as errored (which makes it stop accepting logs) func (g *ContainerLogConsumer) MarkAsErrored() { g.hasErrored = true g.isDone = true close(g.logListeningDone) } +// GetContainer returns the container that this consumer is connected to func (g *ContainerLogConsumer) GetContainer() LogProducingContainer { return g.container } @@ -729,6 +741,7 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { } } +// streamLogToTempFile streams log to temp file func (g *ContainerLogConsumer) streamLogToTempFile(content LogContent) error { if g.encoder == nil { return errors.New("encoder is nil, this should never happen") @@ -737,6 +750,7 @@ func (g *ContainerLogConsumer) streamLogToTempFile(content LogContent) error { return g.encoder.Encode(content) } +// hasLogTarget checks if the consumer has a particular log target func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { for _, lt := range g.logTargets { if lt == logTarget { @@ -747,6 +761,7 @@ func (g *ContainerLogConsumer) hasLogTarget(logTarget LogTarget) bool { return false } +// getLogTargetsFromEnv gets log targets from LOGSTREAM_LOG_TARGETS env var func getLogTargetsFromEnv() ([]LogTarget, error) { envLogTargetsValue := os.Getenv("LOGSTREAM_LOG_TARGETS") if envLogTargetsValue != "" { diff --git a/logstream/logstream_handler_test.go b/logstream/logstream_handler_test.go new file mode 100644 index 000000000..a2633ba99 --- /dev/null +++ b/logstream/logstream_handler_test.go @@ -0,0 +1,127 @@ +package logstream_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/logstream" +) + +type MockedLogHandler struct { + logs []logstream.LogContent + Target logstream.LogTarget + executionId string +} + +func (m *MockedLogHandler) Handle(consumer *logstream.ContainerLogConsumer, content logstream.LogContent) error { + m.logs = append(m.logs, content) + return nil +} + +func (m *MockedLogHandler) GetLogLocation(consumers map[string]*logstream.ContainerLogConsumer) (string, error) { + return "", nil +} + +func (m *MockedLogHandler) GetTarget() logstream.LogTarget { + return m.Target +} + +func (m *MockedLogHandler) SetRunId(executionId string) { + m.executionId = executionId +} + +func (m *MockedLogHandler) GetRunId() string { + return m.executionId +} + +func TestMultipleMockedLoggingTargets(t *testing.T) { + ctx := context.Background() + testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} + d, err := NewDeployment(ctx, testData) + // nolint + defer d.Shutdown(ctx) + require.NoError(t, err) + mockedFileHandler := &MockedLogHandler{Target: logstream.File} + mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} + lw, err := logstream.NewLogStream( + t, + nil, + logstream.WithCustomLogHandler(logstream.File, mockedFileHandler), + logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), + logstream.WithLogTarget(logstream.Loki), + logstream.WithLogTarget(logstream.File), + ) + require.NoError(t, err, "failed to create logstream") + err = d.ConnectLogs(lw) + require.NoError(t, err, "failed to connect logs") + + time.Sleep(2 * time.Second) + err = lw.FlushLogsToTargets() + require.NoError(t, err, "failed to flush logs to targets") + + assertMockedHandlerHasLogs(t, mockedFileHandler) + assertMockedHandlerHasLogs(t, mockedLokiHanlder) + + err = lw.Shutdown(ctx) + require.NoError(t, err, "failed to shutdown logstream") +} + +func TestOneMockedLoggingTarget(t *testing.T) { + ctx := context.Background() + testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} + d, err := NewDeployment(ctx, testData) + // nolint + defer d.Shutdown(ctx) + require.NoError(t, err) + mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} + lw, err := logstream.NewLogStream( + t, + nil, + logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), + logstream.WithLogTarget(logstream.Loki), + ) + require.NoError(t, err, "failed to create logstream") + err = d.ConnectLogs(lw) + require.NoError(t, err, "failed to connect logs") + + time.Sleep(2 * time.Second) + err = lw.FlushLogsToTargets() + require.NoError(t, err, "failed to flush logs to targets") + + assertMockedHandlerHasLogs(t, mockedLokiHanlder) + + err = lw.Shutdown(ctx) + require.NoError(t, err, "failed to shutdown logstream") +} + +func assertMockedHandlerHasLogs(t *testing.T, handler *MockedLogHandler) { + matches := make(map[string]int) + matches["A"] = 0 + matches["B"] = 0 + matches["C"] = 0 + + for _, log := range handler.logs { + require.Equal(t, log.TestName, t.Name()) + require.Equal(t, log.ContainerName, "container-0") + + if bytes.Equal(log.Content, A) { + matches["A"]++ + } + + if bytes.Equal(log.Content, B) { + matches["B"]++ + } + + if bytes.Equal(log.Content, C) { + matches["C"]++ + } + } + + require.Greater(t, matches["A"], 0, "A should be present at least once in handler for %s", handler.Target) + require.Greater(t, matches["B"], 0, "B should be matched at least once in handler for %s", handler.Target) + require.Greater(t, matches["C"], 0, "C should be matched at least once in handler for %s", handler.Target) +} diff --git a/logstream/logstream_handlers.go b/logstream/logstream_handlers.go index bcf9df243..af6e0560d 100644 --- a/logstream/logstream_handlers.go +++ b/logstream/logstream_handlers.go @@ -37,7 +37,7 @@ func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { return handlers } -// streams logs to local files +// FileLogHandler saves logs to local files type FileLogHandler struct { logFolder string shouldSkipLogging bool @@ -103,7 +103,7 @@ func (h *FileLogHandler) GetRunId() string { return h.runId } -// streams logs to Loki +// LokiLogHandler sends logs to Loki type LokiLogHandler struct { grafanaUrl string shouldSkipLogging bool @@ -189,7 +189,7 @@ func (h *LokiLogHandler) GetRunId() string { return h.runId } -// stores logs in memory +// InMemoryLogHandler stores logs in memory type InMemoryLogHandler struct { logs map[string][]LogContent runId string diff --git a/logstream/logstream_test.go b/logstream/logstream_test.go index 1db2a722f..3878c1d40 100644 --- a/logstream/logstream_test.go +++ b/logstream/logstream_test.go @@ -703,7 +703,7 @@ func TestLogStreamConnectRetryMockContainer_FailsFirstRestart(t *testing.T) { mockedContainer.startError = errors.New("still running") // wait for one second before clearing errors, so that we retry to connect - time.Sleep(1 * time.Second) + time.Sleep(600 * time.Millisecond) mockedContainer.startError = nil mockedContainer.errorChannelError = nil }() diff --git a/logstream/logstream_user_loki_test.go b/logstream/logstream_user_loki_test.go index 1b4e29455..2105fa080 100644 --- a/logstream/logstream_user_loki_test.go +++ b/logstream/logstream_user_loki_test.go @@ -1,7 +1,6 @@ package logstream_test import ( - "os" "testing" "time" @@ -15,7 +14,6 @@ import ( func TestExampleLokiStreaming(t *testing.T) { t.Skip("uncomment and run manually") - os.Setenv("LOGSTREAM_LOG_TARGETS", "loki") tests := []testData{ { name: "stream all container logs to Loki, subtest 1", @@ -45,7 +43,7 @@ func TestExampleLokiStreaming(t *testing.T) { // nolint defer d.Shutdown(ctx) require.NoError(t, err) - lw, err := logstream.NewLogStream(t, nil) + lw, err := logstream.NewLogStream(t, nil, logstream.WithLogTarget(logstream.Loki)) require.NoError(t, err) err = d.ConnectLogs(lw) require.NoError(t, err) diff --git a/logstream/logstream_user_test.go b/logstream/logstream_user_test.go index e2397fac1..f7d4f257f 100644 --- a/logstream/logstream_user_test.go +++ b/logstream/logstream_user_test.go @@ -109,118 +109,3 @@ func TestFileLoggingTarget(t *testing.T) { err = lw.Shutdown(ctx) require.NoError(t, err, "failed to shutdown logstream") } - -type MockedLogHandler struct { - logs []logstream.LogContent - Target logstream.LogTarget - executionId string -} - -func (m *MockedLogHandler) Handle(consumer *logstream.ContainerLogConsumer, content logstream.LogContent) error { - m.logs = append(m.logs, content) - return nil -} - -func (m *MockedLogHandler) GetLogLocation(consumers map[string]*logstream.ContainerLogConsumer) (string, error) { - return "", nil -} - -func (m *MockedLogHandler) GetTarget() logstream.LogTarget { - return m.Target -} - -func (m *MockedLogHandler) SetRunId(executionId string) { - m.executionId = executionId -} - -func (m *MockedLogHandler) GetRunId() string { - return m.executionId -} - -func TestMultipleMockedLoggingTargets(t *testing.T) { - ctx := context.Background() - testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} - d, err := NewDeployment(ctx, testData) - // nolint - defer d.Shutdown(ctx) - require.NoError(t, err) - mockedFileHandler := &MockedLogHandler{Target: logstream.File} - mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} - lw, err := logstream.NewLogStream( - t, - nil, - logstream.WithCustomLogHandler(logstream.File, mockedFileHandler), - logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), - logstream.WithLogTarget(logstream.Loki), - logstream.WithLogTarget(logstream.File), - ) - require.NoError(t, err, "failed to create logstream") - err = d.ConnectLogs(lw) - require.NoError(t, err, "failed to connect logs") - - time.Sleep(2 * time.Second) - err = lw.FlushLogsToTargets() - require.NoError(t, err, "failed to flush logs to targets") - - assertMockedHandlerHasLogs(t, mockedFileHandler) - assertMockedHandlerHasLogs(t, mockedLokiHanlder) - - err = lw.Shutdown(ctx) - require.NoError(t, err, "failed to shutdown logstream") -} - -func TestOneMockedLoggingTarget(t *testing.T) { - ctx := context.Background() - testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} - d, err := NewDeployment(ctx, testData) - // nolint - defer d.Shutdown(ctx) - require.NoError(t, err) - mockedLokiHanlder := &MockedLogHandler{Target: logstream.Loki} - lw, err := logstream.NewLogStream( - t, - nil, - logstream.WithCustomLogHandler(logstream.Loki, mockedLokiHanlder), - logstream.WithLogTarget(logstream.Loki), - ) - require.NoError(t, err, "failed to create logstream") - err = d.ConnectLogs(lw) - require.NoError(t, err, "failed to connect logs") - - time.Sleep(2 * time.Second) - err = lw.FlushLogsToTargets() - require.NoError(t, err, "failed to flush logs to targets") - - assertMockedHandlerHasLogs(t, mockedLokiHanlder) - - err = lw.Shutdown(ctx) - require.NoError(t, err, "failed to shutdown logstream") -} - -func assertMockedHandlerHasLogs(t *testing.T, handler *MockedLogHandler) { - matches := make(map[string]int) - matches["A"] = 0 - matches["B"] = 0 - matches["C"] = 0 - - for _, log := range handler.logs { - require.Equal(t, log.TestName, t.Name()) - require.Equal(t, log.ContainerName, "container-0") - - if bytes.Equal(log.Content, A) { - matches["A"]++ - } - - if bytes.Equal(log.Content, B) { - matches["B"]++ - } - - if bytes.Equal(log.Content, C) { - matches["C"]++ - } - } - - require.Greater(t, matches["A"], 0, "A should be present at least once in handler for %s", handler.Target) - require.Greater(t, matches["B"], 0, "B should be matched at least once in handler for %s", handler.Target) - require.Greater(t, matches["C"], 0, "C should be matched at least once in handler for %s", handler.Target) -} From e426e0fc06913be32df82312c414a3d01e0fd5cb Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 10:40:04 -0300 Subject: [PATCH 32/40] when shuttindg down logstream disconnect container first to avoid accepting logs, when consumer is already stopped --- logstream/logstream.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/logstream/logstream.go b/logstream/logstream.go index 17adb57c6..fb08f6a88 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -333,15 +333,16 @@ func (m *LogStream) GetConsumers() map[string]*ContainerLogConsumer { // Shutdown disconnects all containers and stops all consumers func (m *LogStream) Shutdown(context context.Context) error { var err error - for _, c := range m.consumers { - if stopErr := c.Stop(); stopErr != nil { - m.log.Error(). - Err(stopErr). - Str("Name", c.name). - Msg("Failed to stop container") - err = stopErr + + var wrapError = func(newErr error) { + if err == nil { + err = newErr + } else { + err = errors.Wrap(err, newErr.Error()) } + } + for _, c := range m.consumers { discErr := m.DisconnectContainer(c.container) if discErr != nil { m.log.Error(). @@ -349,11 +350,15 @@ func (m *LogStream) Shutdown(context context.Context) error { Str("Name", c.name). Msg("Failed to disconnect container") - if err == nil { - err = discErr - } else { - err = errors.Wrap(err, discErr.Error()) - } + wrapError(discErr) + } + + if stopErr := c.Stop(); stopErr != nil { + m.log.Error(). + Err(stopErr). + Str("Name", c.name). + Msg("Failed to stop container") + wrapError(stopErr) } } From 07fb460871845d1780afcc08b77cd6e73afc9246 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 11:00:58 -0300 Subject: [PATCH 33/40] update readme, add method for flushing andshutting down --- README.md | 4 ++- logstream/logstream.go | 63 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 54 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 5744fb4e3..b8076294a 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,9 @@ LogStream stores all logs in gob temporary file. To actually send/save them, you When using `in-memory` or `file` target no other environment variables are required. When using `loki` target, following environment variables are required: * `LOKI_TENTANT_ID` - tenant ID * `LOKI_URL` - Loki URL to which logs will be pushed -* `LOKI_BASIC_AUTH` +* `LOKI_BASIC_AUTH` -- only needed when running in CI and using public endpoint + +Also, do remember that different `LOKI_URL` should be used when running in CI and everywhere else. In CI it should be a public endpoint, while in local environment it should be a private one. You can print log location for each target using this function: `(m *LogStream) PrintLogTargetsLocations()`. For `file` target it will print relative folder path, for `loki` it will print URL of a Grafana Dashboard scoped to current execution and container ids. For `in-memory` target it's no-op. diff --git a/logstream/logstream.go b/logstream/logstream.go index fb08f6a88..728b233e2 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -330,27 +330,26 @@ func (m *LogStream) GetConsumers() map[string]*ContainerLogConsumer { return m.consumers } +// wrapError wraps existing error with new error +func wrapError(existingErr, newErr error) error { + if existingErr == nil { + return newErr + } + return errors.Wrap(existingErr, newErr.Error()) +} + // Shutdown disconnects all containers and stops all consumers func (m *LogStream) Shutdown(context context.Context) error { var err error - - var wrapError = func(newErr error) { - if err == nil { - err = newErr - } else { - err = errors.Wrap(err, newErr.Error()) - } - } - for _, c := range m.consumers { discErr := m.DisconnectContainer(c.container) if discErr != nil { m.log.Error(). - Err(err). + Err(discErr). Str("Name", c.name). Msg("Failed to disconnect container") - wrapError(discErr) + err = wrapError(err, discErr) } if stopErr := c.Stop(); stopErr != nil { @@ -358,7 +357,7 @@ func (m *LogStream) Shutdown(context context.Context) error { Err(stopErr). Str("Name", c.name). Msg("Failed to stop container") - wrapError(stopErr) + err = wrapError(err, stopErr) } } @@ -369,6 +368,46 @@ func (m *LogStream) Shutdown(context context.Context) error { return err } +// FlushAndShutdown flushes all logs to their targets and shuts down the log stream in a default sequence +func (m *LogStream) FlushAndShutdown() error { + var wrappedErr error + + // first disonnect all containers, so that no new logs are accepted + for _, c := range m.consumers { + if err := m.DisconnectContainer(c.container); err != nil { + m.log.Error(). + Err(err). + Str("Name", c.name). + Msg("Failed to disconnect container") + + wrappedErr = wrapError(wrappedErr, err) + } + } + + for _, c := range m.consumers { + if err := c.Stop(); err != nil { + m.log.Error(). + Err(err). + Str("Name", c.name). + Msg("Failed to stop container") + wrappedErr = wrapError(wrappedErr, err) + } + } + + if err := m.FlushLogsToTargets(); err != nil { + m.log.Error(). + Err(err). + Msg("Failed to flush logs to targets") + wrappedErr = wrapError(wrappedErr, err) + } + + if m.loki != nil { + m.loki.Stop() + } + + return wrappedErr +} + type LogWriter = func(testName string, name string, location interface{}) error // PrintLogTargetsLocations prints all log targets locations to stdout From ddcbbe4ac125eebf8c374f1c85b73b2dbc756e62 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 12:11:00 -0300 Subject: [PATCH 34/40] stop consumer, when disconnecting container --- logstream/logstream.go | 48 +++++++++++++++++++++---------------- logstream/logstream_test.go | 4 ---- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/logstream/logstream.go b/logstream/logstream.go index 728b233e2..bdf0cc7a5 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -351,14 +351,6 @@ func (m *LogStream) Shutdown(context context.Context) error { err = wrapError(err, discErr) } - - if stopErr := c.Stop(); stopErr != nil { - m.log.Error(). - Err(stopErr). - Str("Name", c.name). - Msg("Failed to stop container") - err = wrapError(err, stopErr) - } } if m.loki != nil { @@ -384,16 +376,6 @@ func (m *LogStream) FlushAndShutdown() error { } } - for _, c := range m.consumers { - if err := c.Stop(); err != nil { - m.log.Error(). - Err(err). - Str("Name", c.name). - Msg("Failed to stop container") - wrappedErr = wrapError(wrappedErr, err) - } - } - if err := m.FlushLogsToTargets(); err != nil { m.log.Error(). Err(err). @@ -442,7 +424,7 @@ func (m *LogStream) SaveLogTargetsLocations(writer LogWriter) { } // Stop stops the consumer and closes temp file -func (g *ContainerLogConsumer) Stop() error { +func (g *ContainerLogConsumer) stop() error { if g.isDone { return nil } @@ -460,12 +442,36 @@ func (g *ContainerLogConsumer) Stop() error { // DisconnectContainer disconnects particular container func (m *LogStream) DisconnectContainer(container LogProducingContainer) error { + var err error + if container.IsRunning() { m.log.Info().Str("container", container.GetContainerID()).Msg("Disconnecting container") - return container.StopLogProducer() + err = container.StopLogProducer() } - return nil + consumerFound := false + for _, consumer := range m.consumers { + if consumer.container.GetContainerID() == container.GetContainerID() { + consumerFound = true + if stopErr := consumer.stop(); err != nil { + m.log.Error(). + Err(stopErr). + Str("Name", consumer.name). + Msg("Failed to stop consumer") + err = wrapError(err, stopErr) + } + delete(m.consumers, consumer.name) + break + } + } + + if !consumerFound { + m.log.Warn(). + Str("container ID", container.GetContainerID()). + Msg("No consume found for container") + } + + return err } // ContainerLogs return all logs for particular container diff --git a/logstream/logstream_test.go b/logstream/logstream_test.go index 3878c1d40..a5f9b433c 100644 --- a/logstream/logstream_test.go +++ b/logstream/logstream_test.go @@ -167,8 +167,6 @@ func TestLogStreamDocker(t *testing.T) { // this code terminates the containers properly for _, c := range lw.GetConsumers() { if !tc.exitEarly { - stopErr := c.Stop() - require.NoError(t, stopErr, "should not fail to stop log producer") if err := lw.DisconnectContainer(c.GetContainer()); err != nil { t.Fatalf("failed to disconnect container: %s", err.Error()) } @@ -415,8 +413,6 @@ func TestLogStreamTwoDockerContainers(t *testing.T) { name, err := c.GetContainer().Name(ctx) require.NoError(t, err, "should not fail to get container name") if name == containerOneName { - stopErr := c.Stop() - require.NoError(t, stopErr, "should not fail to stop log producer") err = lw.DisconnectContainer(containerOne) require.NoError(t, err, "logstream should disconnect from container") } From d38f1c1bad90103fdaf61c5baffc13875edea95f Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 12:22:42 -0300 Subject: [PATCH 35/40] allow to overwrite consumer in logwatch, fix stopping sequence --- logstream/logstream.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/logstream/logstream.go b/logstream/logstream.go index bdf0cc7a5..e9ad0ecfe 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -206,10 +206,6 @@ func (m *LogStream) ConnectContainer(ctx context.Context, container LogProducing prefix = name } - if _, ok := m.consumers[name]; ok { - return errors.Errorf("container %s is already connected", name) - } - enabledLogTargets := make([]LogTarget, 0) for logTarget := range m.logTargetHandlers { enabledLogTargets = append(enabledLogTargets, logTarget) @@ -433,10 +429,6 @@ func (g *ContainerLogConsumer) stop() error { g.logListeningDone <- struct{}{} defer close(g.logListeningDone) - if g.tempFile != nil { - return g.tempFile.Close() - } - return nil } @@ -460,7 +452,6 @@ func (m *LogStream) DisconnectContainer(container LogProducingContainer) error { Msg("Failed to stop consumer") err = wrapError(err, stopErr) } - delete(m.consumers, consumer.name) break } } From 2568fb4ee780e1f24bc2bd54c6cc05d07e559248 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 13:07:45 -0300 Subject: [PATCH 36/40] add test name to grafana dashboard url, if test name is set --- logstream/logstream_handlers.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/logstream/logstream_handlers.go b/logstream/logstream_handlers.go index af6e0560d..28ec64e4e 100644 --- a/logstream/logstream_handlers.go +++ b/logstream/logstream_handlers.go @@ -142,6 +142,10 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum return h.grafanaUrl, nil } + if len(consumers) == 0 { + return "", errors.New("no Loki consumers found") + } + grafanaBaseUrl := os.Getenv("GRAFANA_URL") if grafanaBaseUrl == "" { return "", errors.New("GRAFANA_URL env var is not set") @@ -157,10 +161,7 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum sb.WriteString("/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs?orgId=1&") sb.WriteString(fmt.Sprintf("var-run_id=%s", h.runId)) - if len(consumers) == 0 { - return "", errors.New("no Loki consumers found") - } - + var testName string for _, c := range consumers { if c.hasLogTarget(Loki) { sb.WriteString(fmt.Sprintf("&var-container_id=%s", c.name)) @@ -169,9 +170,16 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum if c.GetStartTime().Before(rangeFrom) { rangeFrom = c.GetStartTime() } + + if testName == "" && c.lw.testName != NO_TEST { + testName = c.lw.testName + } } sb.WriteString(fmt.Sprintf("&from=%d&to=%d", rangeFrom.UnixMilli(), rangeTo.UnixMilli())) + if testName != "" { + sb.WriteString(fmt.Sprintf("&var-test=%s", testName)) + } h.grafanaUrl = sb.String() return h.grafanaUrl, nil From 6f9e83ee5ecf87c574092a71afdd5641a55eab6b Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 16:22:52 -0300 Subject: [PATCH 37/40] open log file in handler.Init() to avoid opening file each time a new log is saved --- logstream/logstream.go | 66 +++++++++++++++++++------ logstream/logstream_handler_test.go | 8 +++ logstream/logstream_handlers.go | 77 ++++++++++++++++++++++++----- logstream/logstream_test.go | 6 +-- 4 files changed, 126 insertions(+), 31 deletions(-) diff --git a/logstream/logstream.go b/logstream/logstream.go index e9ad0ecfe..61f51ecc6 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -419,7 +419,7 @@ func (m *LogStream) SaveLogTargetsLocations(writer LogWriter) { } } -// Stop stops the consumer and closes temp file +// Stop stops the consumer and closes listening channel (it won't be accepting any logs from now on) func (g *ContainerLogConsumer) stop() error { if g.isDone { return nil @@ -475,7 +475,7 @@ func (m *LogStream) ContainerLogs(name string) ([]string, error) { return nil } - err := m.GetAllLogsAndConsume(NoOpConsumerFn, getLogsFn) + err := m.GetAllLogsAndConsume(NoOpConsumerFn, getLogsFn, NoOpConsumerFn) if err != nil { return []string{}, err } @@ -492,7 +492,7 @@ func NoOpConsumerFn(consumer *ContainerLogConsumer) error { } // GetAllLogsAndConsume gets all logs for all consumers (containers) and consumes them using consumeLogFn -func (m *LogStream) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn) (loopErr error) { +func (m *LogStream) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consumeLogFn ConsumerLogConsumingFn, postExecuteFn ConsumerConsumingFn) (loopErr error) { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() @@ -578,6 +578,16 @@ func (m *LogStream) GetAllLogsAndConsume(preExecuteFn ConsumerConsumingFn, consu return } } + + postExecuteErr := postExecuteFn(consumer) + if postExecuteErr != nil { + m.log.Error(). + Err(postExecuteErr). + Str("Container", consumer.name). + Msg("Failed to run post-execute function") + attachError(postExecuteErr) + continue + } } return @@ -589,11 +599,22 @@ func (m *LogStream) FlushLogsToTargets() error { // do not accept any new logs consumer.isDone = true + for _, handler := range m.logTargetHandlers { + consumer.ls.log.Debug(). + Str("container name", consumer.name). + Str("Handler", string(handler.GetTarget())). + Msg("Initializing log target handler") + + if err := handler.Init(consumer); err != nil { + return err + } + } + return nil } var flushLogsFn = func(consumer *ContainerLogConsumer, log LogContent) error { for _, logTarget := range consumer.logTargets { - if handler, ok := consumer.lw.logTargetHandlers[logTarget]; ok { + if handler, ok := consumer.ls.logTargetHandlers[logTarget]; ok { if err := handler.Handle(consumer, log); err != nil { m.log.Error(). Err(err). @@ -615,7 +636,22 @@ func (m *LogStream) FlushLogsToTargets() error { return nil } - flushErr := m.GetAllLogsAndConsume(preExecuteFn, flushLogsFn) + var postExecuteFn = func(consumer *ContainerLogConsumer) error { + for _, handler := range m.logTargetHandlers { + consumer.ls.log.Debug(). + Str("container name", consumer.name). + Str("Handler", string(handler.GetTarget())). + Msg("Tearing down log target handler") + + if err := handler.Teardown(); err != nil { + return err + } + } + + return nil + } + + flushErr := m.GetAllLogsAndConsume(preExecuteFn, flushLogsFn, postExecuteFn) if flushErr == nil { m.log.Info(). Msg("Finished flushing logs") @@ -633,7 +669,7 @@ type ContainerLogConsumer struct { name string prefix string logTargets []LogTarget - lw *LogStream + ls *LogStream tempFile *os.File encoder *gob.Encoder isDone bool @@ -656,7 +692,7 @@ func newContainerLogConsumer(ctx context.Context, lw *LogStream, container LogPr name: containerName, prefix: prefix, logTargets: logTargets, - lw: lw, + ls: lw, isDone: false, hasErrored: false, logListeningDone: make(chan struct{}, 1), @@ -717,16 +753,16 @@ func (g *ContainerLogConsumer) GetContainer() LogProducingContainer { // Accept accepts the log message from particular container and saves it to the temp gob file func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { - g.lw.acceptMutex.Lock() - defer g.lw.acceptMutex.Unlock() + g.ls.acceptMutex.Lock() + defer g.ls.acceptMutex.Unlock() if g.hasErrored { return } if g.isDone { - g.lw.log.Error(). - Str("Test", g.lw.testName). + g.ls.log.Error(). + Str("Test", g.ls.testName). Str("Container", g.name). Str("Log", string(l.Content)). Msg("Consumer has finished, but you are still trying to accept logs. This should never happen") @@ -739,7 +775,7 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { } if g.tempFile == nil || g.encoder == nil { - g.lw.log.Error(). + g.ls.log.Error(). Str("Container", g.name). Msg("temp file or encoder is nil, consumer cannot work, this should never happen") g.MarkAsErrored() @@ -761,21 +797,21 @@ func (g *ContainerLogConsumer) Accept(l testcontainers.Log) { } content := LogContent{ - TestName: g.lw.testName, + TestName: g.ls.testName, ContainerName: g.name, Content: l.Content, Time: time.Now(), } if err := g.streamLogToTempFile(content); err != nil { - g.lw.log.Error(). + g.ls.log.Error(). Err(err). Str("Container", g.name). Msg("Failed to stream log to temp file") g.hasErrored = true err = g.tempFile.Close() if err != nil { - g.lw.log.Error(). + g.ls.log.Error(). Err(err). Msg("Failed to close temp file") } diff --git a/logstream/logstream_handler_test.go b/logstream/logstream_handler_test.go index a2633ba99..96a922260 100644 --- a/logstream/logstream_handler_test.go +++ b/logstream/logstream_handler_test.go @@ -38,6 +38,14 @@ func (m *MockedLogHandler) GetRunId() string { return m.executionId } +func (m *MockedLogHandler) Init(_ *logstream.ContainerLogConsumer) error { + return nil +} + +func (m *MockedLogHandler) Teardown() error { + return nil +} + func TestMultipleMockedLoggingTargets(t *testing.T) { ctx := context.Background() testData := testData{repeat: 10, perSecond: 0.01, streams: []string{"A\nB\nC\nD"}} diff --git a/logstream/logstream_handlers.go b/logstream/logstream_handlers.go index 28ec64e4e..386380689 100644 --- a/logstream/logstream_handlers.go +++ b/logstream/logstream_handlers.go @@ -26,6 +26,8 @@ type HandleLogTarget interface { GetTarget() LogTarget SetRunId(string) GetRunId() string + Init(*ContainerLogConsumer) error + Teardown() error } func getDefaultLogHandlers() map[LogTarget]HandleLogTarget { @@ -42,6 +44,7 @@ type FileLogHandler struct { logFolder string shouldSkipLogging bool runId string + logFile *os.File } func (h *FileLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { @@ -103,6 +106,33 @@ func (h *FileLogHandler) GetRunId() string { return h.runId } +func (h *FileLogHandler) Init(c *ContainerLogConsumer) error { + folder, err := h.getOrCreateLogFolder(c.ls.testName) + if err != nil { + h.shouldSkipLogging = true + + return errors.Wrap(err, "failed to create logs folder. File logging stopped") + } + + logFileName := filepath.Join(folder, fmt.Sprintf("%s.log", c.name)) + h.logFile, err = os.OpenFile(logFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + h.shouldSkipLogging = true + + return errors.Wrap(err, "failed to open log file. File logging stopped") + } + + return nil +} + +func (h *FileLogHandler) Teardown() error { + if h.logFile != nil { + return h.logFile.Close() + } + + return nil +} + // LokiLogHandler sends logs to Loki type LokiLogHandler struct { grafanaUrl string @@ -112,22 +142,15 @@ type LokiLogHandler struct { func (h *LokiLogHandler) Handle(c *ContainerLogConsumer, content LogContent) error { if h.shouldSkipLogging { - c.lw.log.Warn().Str("Test", content.TestName).Msg("Skipping pushing logs to Loki for this test") + c.ls.log.Warn().Str("Test", content.TestName).Msg("Skipping pushing logs to Loki for this test") return nil } - if c.lw.loki == nil { - loki, err := wasp.NewLokiClient(wasp.NewEnvLokiConfig()) - if err != nil { - c.lw.log.Error().Err(err).Msg("Failed to create Loki client") - h.shouldSkipLogging = true - - return err - } - c.lw.loki = loki + if c.ls.loki == nil { + return errors.New("Loki client is not initialized. Have you called Init()?") } - err := c.lw.loki.Handle(model.LabelSet{ + err := c.ls.loki.Handle(model.LabelSet{ "type": "log_stream", "test": model.LabelValue(content.TestName), "container_id": model.LabelValue(content.ContainerName), @@ -171,8 +194,8 @@ func (h *LokiLogHandler) GetLogLocation(consumers map[string]*ContainerLogConsum rangeFrom = c.GetStartTime() } - if testName == "" && c.lw.testName != NO_TEST { - testName = c.lw.testName + if testName == "" && c.ls.testName != NO_TEST { + testName = c.ls.testName } } @@ -197,6 +220,26 @@ func (h *LokiLogHandler) GetRunId() string { return h.runId } +func (h *LokiLogHandler) Init(c *ContainerLogConsumer) error { + if c.ls.loki == nil { + waspConfig := wasp.NewEnvLokiConfig() + loki, err := wasp.NewLokiClient(waspConfig) + if err != nil { + c.ls.log.Error().Err(err).Msg("Failed to create Loki client") + h.shouldSkipLogging = true + + return err + } + c.ls.loki = loki + } + + return nil +} + +func (h *LokiLogHandler) Teardown() error { + return nil +} + // InMemoryLogHandler stores logs in memory type InMemoryLogHandler struct { logs map[string][]LogContent @@ -232,3 +275,11 @@ func (h *InMemoryLogHandler) SetRunId(runId string) { func (h *InMemoryLogHandler) GetRunId() string { return h.runId } + +func (h *InMemoryLogHandler) Init(_ *ContainerLogConsumer) error { + return nil +} + +func (h *InMemoryLogHandler) Teardown() error { + return nil +} diff --git a/logstream/logstream_test.go b/logstream/logstream_test.go index a5f9b433c..73728c06b 100644 --- a/logstream/logstream_test.go +++ b/logstream/logstream_test.go @@ -252,7 +252,7 @@ func TestLogStream_GetAllLogs_ErrorsAfterFiveLogs(t *testing.T) { return errors.New(expectedErrorText) } - err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn) + err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn, logstream.NoOpConsumerFn) require.Error(t, err, "should fail to get all logs") require.Equal(t, err.Error(), expectedErrorText, "should fail with test error") require.Equal(t, 5, len(logsProcessed), "should process 5 logs") @@ -312,7 +312,7 @@ func TestLogStream_GetAllLogs_TwoConsumers_FirstErrorsAfterFiveLogs(t *testing.T return errors.New(expectedErrorText) } - err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn) + err = lw.GetAllLogsAndConsume(logstream.NoOpConsumerFn, testFn, logstream.NoOpConsumerFn) require.Error(t, err, "should fail to get all logs") require.Equal(t, expectedErrorText, err.Error(), "should fail with test error") require.Equal(t, 5, len(logsProcessed[containerName_1]), "should process 5 logs for first container") @@ -368,7 +368,7 @@ func TestLogStream_GetAllLogs_ErrorsBeforeConsumption(t *testing.T) { return errors.New(expectedErrorText) } - err = lw.GetAllLogsAndConsume(errorConsumerFn, testFn) + err = lw.GetAllLogsAndConsume(errorConsumerFn, testFn, logstream.NoOpConsumerFn) require.Error(t, err, "should fail to get all logs") require.Equal(t, err.Error(), expectedErrorText, "should fail with test error") require.Equal(t, 0, len(logsProcessed), "should process zero logs") From 6a0f4141cc71cef5a2eb2d02fc9ac414c249d203 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 6 Dec 2023 16:37:02 -0300 Subject: [PATCH 38/40] add method to get absolute folder path --- utils/osutil/osutil.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/utils/osutil/osutil.go b/utils/osutil/osutil.go index 5c5e5d23f..73508a2b2 100644 --- a/utils/osutil/osutil.go +++ b/utils/osutil/osutil.go @@ -7,6 +7,7 @@ import ( "io" "os" "os/exec" + "path/filepath" "strings" "github.com/rs/zerolog" @@ -86,3 +87,12 @@ func ExecCmdWithOptions(ctx context.Context, l zerolog.Logger, command string, o go readStdPipe(stdout, outputFunction) return cmd.Wait() } + +func GetAbsoluteFolderPath(folder string) (string, error) { + wd, err := os.Getwd() + if err != nil { + return "", err + } + + return filepath.Join(wd, folder), nil +} From 4a206b695aa394a86d94e74984686f5daa074763 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 7 Dec 2023 09:39:20 -0300 Subject: [PATCH 39/40] use latest loki, that exists on log pushing --- go.mod | 5 ++++- go.sum | 6 ++++++ logstream/logstream.go | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e10928d1c..7373c470c 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/prometheus/common v0.44.0 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/wasp v0.3.6 + github.com/smartcontractkit/wasp v0.3.7 github.com/stretchr/testify v1.8.4 github.com/testcontainers/testcontainers-go v0.23.0 go.uber.org/atomic v1.11.0 @@ -50,6 +50,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect ) +// avoids ambigious imports of indirect dependencies +exclude github.com/hashicorp/consul v1.2.1 + // K8s versions are infuriatingly inconsistent, so we pin them here. replace ( // replicating the replace directive on cosmos SDK diff --git a/go.sum b/go.sum index c7b44e4f8..a4965fc89 100644 --- a/go.sum +++ b/go.sum @@ -1393,6 +1393,12 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/wasp v0.3.6 h1:1TLWfrTzqZwNvyyoKzPZ8FLQat2lNz640eM+mMh2YxM= github.com/smartcontractkit/wasp v0.3.6/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= +github.com/smartcontractkit/wasp v0.3.7-0.20231206192146-5074f5d22131 h1:sWigo1JJIcScEYu63nB6gh3QzHNc9RdZ1mMdQ2gm1Ps= +github.com/smartcontractkit/wasp v0.3.7-0.20231206192146-5074f5d22131/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= +github.com/smartcontractkit/wasp v0.3.7-0.20231206215104-c476be0723c5 h1:fiBq70lmIwH/Bc2pf7CX4bFD5H4CjKJTgsXzycX4JR8= +github.com/smartcontractkit/wasp v0.3.7-0.20231206215104-c476be0723c5/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= +github.com/smartcontractkit/wasp v0.3.7 h1:3toT+iMSHJ1EKQXE+jGnxfmtLlT0gwEl1A7xGyw0NZY= +github.com/smartcontractkit/wasp v0.3.7/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= diff --git a/logstream/logstream.go b/logstream/logstream.go index 61f51ecc6..a916d691d 100644 --- a/logstream/logstream.go +++ b/logstream/logstream.go @@ -350,7 +350,7 @@ func (m *LogStream) Shutdown(context context.Context) error { } if m.loki != nil { - m.loki.Stop() + m.loki.StopNow() } return err @@ -380,7 +380,7 @@ func (m *LogStream) FlushAndShutdown() error { } if m.loki != nil { - m.loki.Stop() + m.loki.StopNow() } return wrappedErr From f455ffb5754c9ad0736d0d8cc4a2b760a79d3d29 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 7 Dec 2023 14:36:43 -0300 Subject: [PATCH 40/40] fix go.sum --- go.sum | 6 ------ 1 file changed, 6 deletions(-) diff --git a/go.sum b/go.sum index a4965fc89..a1a6e3275 100644 --- a/go.sum +++ b/go.sum @@ -1391,12 +1391,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ= github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/smartcontractkit/wasp v0.3.6 h1:1TLWfrTzqZwNvyyoKzPZ8FLQat2lNz640eM+mMh2YxM= -github.com/smartcontractkit/wasp v0.3.6/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= -github.com/smartcontractkit/wasp v0.3.7-0.20231206192146-5074f5d22131 h1:sWigo1JJIcScEYu63nB6gh3QzHNc9RdZ1mMdQ2gm1Ps= -github.com/smartcontractkit/wasp v0.3.7-0.20231206192146-5074f5d22131/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= -github.com/smartcontractkit/wasp v0.3.7-0.20231206215104-c476be0723c5 h1:fiBq70lmIwH/Bc2pf7CX4bFD5H4CjKJTgsXzycX4JR8= -github.com/smartcontractkit/wasp v0.3.7-0.20231206215104-c476be0723c5/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= github.com/smartcontractkit/wasp v0.3.7 h1:3toT+iMSHJ1EKQXE+jGnxfmtLlT0gwEl1A7xGyw0NZY= github.com/smartcontractkit/wasp v0.3.7/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=