diff --git a/cmd/soroban-rpc/internal/config/flags.go b/cmd/soroban-rpc/internal/config/flags.go index 0c313460..d0af16d7 100644 --- a/cmd/soroban-rpc/internal/config/flags.go +++ b/cmd/soroban-rpc/internal/config/flags.go @@ -10,7 +10,7 @@ import ( "github.com/spf13/pflag" ) -// Init adds the CLI flags to the command. This lets the command output the +// AddFlags Init adds the CLI flags to the command. This lets the command output the // flags as part of the --help output. func (cfg *Config) AddFlags(cmd *cobra.Command) error { cfg.flagset = cmd.PersistentFlags() diff --git a/cmd/soroban-rpc/internal/config/main_test.go b/cmd/soroban-rpc/internal/config/main_test.go index c510c1bd..a4af86ad 100644 --- a/cmd/soroban-rpc/internal/config/main_test.go +++ b/cmd/soroban-rpc/internal/config/main_test.go @@ -34,9 +34,11 @@ func TestLoadConfigPathPrecedence(t *testing.T) { })) require.NoError(t, cfg.Validate()) - assert.Equal(t, "/opt/stellar/soroban-rpc/etc/stellar-captive-core.cfg", cfg.CaptiveCoreConfigPath, "should read values from the config path file") + assert.Equal(t, "/opt/stellar/soroban-rpc/etc/stellar-captive-core.cfg", cfg.CaptiveCoreConfigPath, + "should read values from the config path file") assert.Equal(t, "CLI test passphrase", cfg.NetworkPassphrase, "cli flags should override --config-path values") - assert.Equal(t, "/usr/overridden/stellar-core", cfg.StellarCoreBinaryPath, "cli flags should override --config-path values and env vars") + assert.Equal(t, "/usr/overridden/stellar-core", cfg.StellarCoreBinaryPath, + "cli flags should override --config-path values and env vars") assert.Equal(t, "/env/overridden/db", cfg.SQLiteDBPath, "env var should override config file") assert.Equal(t, 2*time.Second, cfg.CoreRequestTimeout, "default value should be used, if not set anywhere else") } diff --git a/cmd/soroban-rpc/internal/config/option_test.go b/cmd/soroban-rpc/internal/config/option_test.go index 83e9b5eb..4fc08354 100644 --- a/cmd/soroban-rpc/internal/config/option_test.go +++ b/cmd/soroban-rpc/internal/config/option_test.go @@ -1,7 +1,6 @@ package config import ( - "fmt" "math" "testing" "time" @@ -51,15 +50,15 @@ func TestValidateRequired(t *testing.T) { } // unset - assert.ErrorContains(t, o.Validate(o), "required-option is required") + require.ErrorContains(t, o.Validate(o), "required-option is required") // set with blank value require.NoError(t, o.setValue("")) - assert.ErrorContains(t, o.Validate(o), "required-option is required") + require.ErrorContains(t, o.Validate(o), "required-option is required") // set with valid value require.NoError(t, o.setValue("not-blank")) - assert.NoError(t, o.Validate(o)) + require.NoError(t, o.Validate(o)) } func TestValidatePositiveUint32(t *testing.T) { @@ -71,15 +70,15 @@ func TestValidatePositiveUint32(t *testing.T) { } // unset - assert.ErrorContains(t, o.Validate(o), "positive-option must be positive") + require.ErrorContains(t, o.Validate(o), "positive-option must be positive") // set with 0 value require.NoError(t, o.setValue(uint32(0))) - assert.ErrorContains(t, o.Validate(o), "positive-option must be positive") + require.ErrorContains(t, o.Validate(o), "positive-option must be positive") // set with valid value require.NoError(t, o.setValue(uint32(1))) - assert.NoError(t, o.Validate(o)) + require.NoError(t, o.Validate(o)) } func TestValidatePositiveInt(t *testing.T) { @@ -91,19 +90,19 @@ func TestValidatePositiveInt(t *testing.T) { } // unset - assert.ErrorContains(t, o.Validate(o), "positive-option must be positive") + require.ErrorContains(t, o.Validate(o), "positive-option must be positive") // set with 0 value require.NoError(t, o.setValue(0)) - assert.ErrorContains(t, o.Validate(o), "positive-option must be positive") + require.ErrorContains(t, o.Validate(o), "positive-option must be positive") // set with negative value require.NoError(t, o.setValue(-1)) - assert.ErrorContains(t, o.Validate(o), "positive-option must be positive") + require.ErrorContains(t, o.Validate(o), "positive-option must be positive") // set with valid value require.NoError(t, o.setValue(1)) - assert.NoError(t, o.Validate(o)) + require.NoError(t, o.Validate(o)) } func TestUnassignableField(t *testing.T) { @@ -126,143 +125,105 @@ func TestNoParserForFlag(t *testing.T) { require.Contains(t, err.Error(), "no parser for flag mykey") } -func TestSetValue(t *testing.T) { +func TestSetValueBool(t *testing.T) { var b bool + testCases := []struct { + name string + value interface{} + err string + }{ + {"valid-bool", true, ""}, + {"valid-bool-string", "true", ""}, + {"valid-bool-string-false", "false", ""}, + {"valid-bool-string-uppercase", "TRUE", ""}, + {"invalid-bool-string", "foobar", "invalid boolean value invalid-bool-string: foobar"}, + } + runTestCases(t, &b, testCases) +} + +func TestSetValueInt(t *testing.T) { var i int + testCases := []struct { + name string + value interface{} + err string + }{ + {"valid-int", 1, ""}, + {"valid-int-string", "1", ""}, + {"invalid-int-string", "abcd", "strconv.ParseInt: parsing \"abcd\": invalid syntax"}, + } + runTestCases(t, &i, testCases) +} + +func TestSetValueUint32(t *testing.T) { var u32 uint32 + testCases := []struct { + name string + value interface{} + err string + }{ + {"valid-uint32", 1, ""}, + {"overflow-uint32", uint64(math.MaxUint32) + 1, "overflow-uint32 overflows uint32"}, + {"negative-uint32", -1, "negative-uint32 cannot be negative"}, + } + runTestCases(t, &u32, testCases) +} + +func TestSetValueUint64(t *testing.T) { var u64 uint64 + testCases := []struct { + name string + value interface{} + err string + }{ + {"valid-uint", 1, ""}, + {"negative-uint", -1, "negative-uint cannot be negative"}, + } + runTestCases(t, &u64, testCases) +} + +func TestSetValueFloat64(t *testing.T) { var f64 float64 - var s string + testCases := []struct { + name string + value interface{} + err string + }{ + {"valid-float", 1.05, ""}, + {"valid-float-int", int64(1234), ""}, + {"valid-float-string", "1.05", ""}, + {"invalid-float-string", "foobar", "strconv.ParseFloat: parsing \"foobar\": invalid syntax"}, + } + runTestCases(t, &f64, testCases) +} - for _, scenario := range []struct { +func TestSetValueString(t *testing.T) { + var s string + testCases := []struct { name string - key interface{} value interface{} - err error + err string }{ - { - name: "valid-bool", - key: &b, - value: true, - err: nil, - }, - { - name: "valid-bool-string", - key: &b, - value: "true", - err: nil, - }, - { - name: "valid-bool-string-false", - key: &b, - value: "false", - err: nil, - }, - { - name: "valid-bool-string-uppercase", - key: &b, - value: "TRUE", - err: nil, - }, - { - name: "invalid-bool-string", - key: &b, - value: "foobar", - err: fmt.Errorf("invalid boolean value invalid-bool-string: foobar"), - }, - { - name: "invalid-bool-string", - key: &b, - value: "foobar", - err: fmt.Errorf("invalid boolean value invalid-bool-string: foobar"), - }, - { - name: "valid-int", - key: &i, - value: 1, - err: nil, - }, - { - name: "valid-int-string", - key: &i, - value: "1", - err: nil, - }, - { - name: "invalid-int-string", - key: &i, - value: "abcd", - err: fmt.Errorf("strconv.ParseInt: parsing \"abcd\": invalid syntax"), - }, - { - name: "valid-uint32", - key: &u32, - value: 1, - err: nil, - }, - { - name: "overflow-uint32", - key: &u32, - value: uint64(math.MaxUint32) + 1, - err: fmt.Errorf("overflow-uint32 overflows uint32"), - }, - { - name: "negative-uint32", - key: &u32, - value: -1, - err: fmt.Errorf("negative-uint32 cannot be negative"), - }, - { - name: "valid-uint", - key: &u64, - value: 1, - err: nil, - }, - { - name: "negative-uint", - key: &u64, - value: -1, - err: fmt.Errorf("negative-uint cannot be negative"), - }, - { - name: "valid-float", - key: &f64, - value: 1.05, - err: nil, - }, - { - name: "valid-float-int", - key: &f64, - value: int64(1234), - err: nil, - }, - { - name: "valid-float-string", - key: &f64, - value: "1.05", - err: nil, - }, - { - name: "invalid-float-string", - key: &f64, - value: "foobar", - err: fmt.Errorf("strconv.ParseFloat: parsing \"foobar\": invalid syntax"), - }, - { - name: "valid-string", - key: &s, - value: "foobar", - err: nil, - }, - } { - t.Run(scenario.name, func(t *testing.T) { + {"valid-string", "foobar", ""}, + } + runTestCases(t, &s, testCases) +} + +func runTestCases(t *testing.T, key interface{}, testCases []struct { + name string + value interface{} + err string +}, +) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { co := Option{ - Name: scenario.name, - ConfigKey: scenario.key, + Name: tc.name, + ConfigKey: key, } - err := co.setValue(scenario.value) - if scenario.err != nil { - require.EqualError(t, err, scenario.err.Error()) + err := co.setValue(tc.value) + if tc.err != "" { + require.EqualError(t, err, tc.err) } else { require.NoError(t, err) } diff --git a/cmd/soroban-rpc/internal/config/options.go b/cmd/soroban-rpc/internal/config/options.go index 58df85e2..b04793a5 100644 --- a/cmd/soroban-rpc/internal/config/options.go +++ b/cmd/soroban-rpc/internal/config/options.go @@ -507,15 +507,12 @@ func (e missingRequiredOptionError) Error() string { } func required(option *Option) error { - switch reflect.ValueOf(option.ConfigKey).Elem().Kind() { - case reflect.Slice: - if reflect.ValueOf(option.ConfigKey).Elem().Len() > 0 { - return nil - } - default: - if !reflect.ValueOf(option.ConfigKey).Elem().IsZero() { - return nil - } + value := reflect.ValueOf(option.ConfigKey).Elem() + + isSet := value.Kind() == reflect.Slice && value.Len() > 0 || value.Kind() != reflect.Slice && !value.IsZero() + + if isSet { + return nil } var waysToSet []string diff --git a/cmd/soroban-rpc/internal/config/parse.go b/cmd/soroban-rpc/internal/config/parse.go index 58a5d80b..f07c1b5e 100644 --- a/cmd/soroban-rpc/internal/config/parse.go +++ b/cmd/soroban-rpc/internal/config/parse.go @@ -94,7 +94,11 @@ func parseString(option *Option, i interface{}) error { case nil: return nil case string: - *option.ConfigKey.(*string) = v + if strPtr, ok := option.ConfigKey.(*string); ok { + *strPtr = v + } else { + return fmt.Errorf("invalid type for %s: expected *string", option.Name) + } default: return fmt.Errorf("could not parse string %s: %v", option.Name, i) } @@ -128,6 +132,11 @@ func parseUint32(option *Option, i interface{}) error { } func parseDuration(option *Option, i interface{}) error { + durationPtr, ok := option.ConfigKey.(*time.Duration) + if !ok { + return fmt.Errorf("invalid type for %s: expected *time.Duration", option.Name) + } + switch v := i.(type) { case nil: return nil @@ -136,11 +145,11 @@ func parseDuration(option *Option, i interface{}) error { if err != nil { return fmt.Errorf("could not parse duration: %q: %w", v, err) } - *option.ConfigKey.(*time.Duration) = d + *durationPtr = d case time.Duration: - *option.ConfigKey.(*time.Duration) = v + *durationPtr = v case *time.Duration: - *option.ConfigKey.(*time.Duration) = *v + *durationPtr = *v default: return fmt.Errorf("%s is not a duration", option.Name) } @@ -148,31 +157,34 @@ func parseDuration(option *Option, i interface{}) error { } func parseStringSlice(option *Option, i interface{}) error { + stringSlicePtr, ok := option.ConfigKey.(*[]string) + if !ok { + return fmt.Errorf("invalid type for %s: expected *[]string", option.Name) + } + switch v := i.(type) { case nil: return nil case string: if v == "" { - *option.ConfigKey.(*[]string) = nil + *stringSlicePtr = nil } else { - *option.ConfigKey.(*[]string) = strings.Split(v, ",") + *stringSlicePtr = strings.Split(v, ",") } - return nil case []string: - *option.ConfigKey.(*[]string) = v - return nil + *stringSlicePtr = v case []interface{}: - *option.ConfigKey.(*[]string) = make([]string, len(v)) + result := make([]string, len(v)) for i, s := range v { - switch s := s.(type) { - case string: - (*option.ConfigKey.(*[]string))[i] = s - default: - return fmt.Errorf("could not parse %s: %v", option.Name, v) + str, ok := s.(string) + if !ok { + return fmt.Errorf("could not parse %s: element %d is not a string", option.Name, i) } + result[i] = str } - return nil + *stringSlicePtr = result default: return fmt.Errorf("could not parse %s: %v", option.Name, v) } + return nil } diff --git a/cmd/soroban-rpc/internal/config/toml.go b/cmd/soroban-rpc/internal/config/toml.go index e6ea5a91..038a6f74 100644 --- a/cmd/soroban-rpc/internal/config/toml.go +++ b/cmd/soroban-rpc/internal/config/toml.go @@ -9,6 +9,11 @@ import ( "github.com/pelletier/go-toml" ) +const ( + maxLineWidth = 80 + commentIndent = 2 +) + func parseToml(r io.Reader, strict bool, cfg *Config) error { tree, err := toml.LoadReader(r) if err != nil { @@ -73,7 +78,7 @@ func (cfg *Config) MarshalTOML() ([]byte, error) { key, toml.SetOptions{ Comment: strings.ReplaceAll( - wordWrap(option.Usage, 80-2), + wordWrap(option.Usage, maxLineWidth-commentIndent), "\n", "\n ", ), diff --git a/cmd/soroban-rpc/internal/config/toml_test.go b/cmd/soroban-rpc/internal/config/toml_test.go index ae762567..b2d2d01d 100644 --- a/cmd/soroban-rpc/internal/config/toml_test.go +++ b/cmd/soroban-rpc/internal/config/toml_test.go @@ -106,25 +106,26 @@ func TestRoundTrip(t *testing.T) { // Generate test values for every option, so we can round-trip test them all. for _, option := range cfg.options() { optType := reflect.ValueOf(option.ConfigKey).Elem().Type() - switch option.ConfigKey.(type) { + switch v := option.ConfigKey.(type) { case *bool: - *option.ConfigKey.(*bool) = true + *v = true case *string: - *option.ConfigKey.(*string) = "test" + *v = "test" case *uint: - *option.ConfigKey.(*uint) = 42 + *v = 42 case *uint32: - *option.ConfigKey.(*uint32) = 32 + *v = 32 case *time.Duration: - *option.ConfigKey.(*time.Duration) = 5 * time.Second + *v = 5 * time.Second case *[]string: - *option.ConfigKey.(*[]string) = []string{"a", "b"} + *v = []string{"a", "b"} case *logrus.Level: - *option.ConfigKey.(*logrus.Level) = logrus.InfoLevel + *v = logrus.InfoLevel case *LogFormat: - *option.ConfigKey.(*LogFormat) = LogFormatText + *v = LogFormatText default: - t.Fatalf("TestRoundTrip not implemented for type %s, on option %s, please add a test value", optType.Kind(), option.Name) + t.Fatalf("TestRoundTrip not implemented for type %s, on option %s, "+ + "please add a test value", optType.Kind(), option.Name) } } diff --git a/cmd/soroban-rpc/internal/daemon/daemon.go b/cmd/soroban-rpc/internal/daemon/daemon.go index 07ca7992..67110369 100644 --- a/cmd/soroban-rpc/internal/daemon/daemon.go +++ b/cmd/soroban-rpc/internal/daemon/daemon.go @@ -13,6 +13,7 @@ import ( "syscall" "time" + "github.com/go-chi/chi" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -149,21 +150,53 @@ func newCaptiveCore(cfg *config.Config, logger *supportlog.Entry) (*ledgerbacken } func MustNew(cfg *config.Config, logger *supportlog.Entry) *Daemon { + logger = setupLogger(cfg, logger) + core := mustCreateCaptiveCore(cfg, logger) + historyArchive := mustCreateHistoryArchive(cfg, logger) + metricsRegistry := prometheus.NewRegistry() + + daemon := &Daemon{ + logger: logger, + core: core, + db: mustOpenDatabase(cfg, logger, metricsRegistry), + done: make(chan struct{}), + metricsRegistry: metricsRegistry, + coreClient: newCoreClientWithMetrics(createStellarCoreClient(cfg), metricsRegistry), + } + + feewindows := daemon.mustInitializeStorage(cfg) + + daemon.ingestService = createIngestService(cfg, logger, daemon, feewindows, historyArchive) + daemon.preflightWorkerPool = createPreflightWorkerPool(cfg, logger, daemon) + daemon.jsonRPCHandler = createJSONRPCHandler(cfg, logger, daemon, feewindows) + + daemon.setupHTTPServers(cfg) + daemon.registerMetrics() + + return daemon +} + +func setupLogger(cfg *config.Config, logger *supportlog.Entry) *supportlog.Entry { logger.SetLevel(cfg.LogLevel) if cfg.LogFormat == config.LogFormatJSON { logger.UseJSONFormatter() } - logger.WithFields(supportlog.F{ "version": config.Version, "commit": config.CommitHash, }).Info("starting Soroban RPC") + return logger +} +func mustCreateCaptiveCore(cfg *config.Config, logger *supportlog.Entry) *ledgerbackend.CaptiveStellarCore { core, err := newCaptiveCore(cfg, logger) if err != nil { logger.WithError(err).Fatal("could not create captive core") } + return core +} +func mustCreateHistoryArchive(cfg *config.Config, logger *supportlog.Entry) *historyarchive.ArchiveInterface { if len(cfg.HistoryArchiveURLs) == 0 { logger.Fatal("no history archives URLs were provided") } @@ -183,52 +216,54 @@ func MustNew(cfg *config.Config, logger *supportlog.Entry) *Daemon { if err != nil { logger.WithError(err).Fatal("could not connect to history archive") } + return &historyArchive +} - metricsRegistry := prometheus.NewRegistry() +func mustOpenDatabase(cfg *config.Config, logger *supportlog.Entry, metricsRegistry *prometheus.Registry) *db.DB { dbConn, err := db.OpenSQLiteDBWithPrometheusMetrics(cfg.SQLiteDBPath, prometheusNamespace, "db", metricsRegistry) if err != nil { logger.WithError(err).Fatal("could not open database") } + return dbConn +} - daemon := &Daemon{ - logger: logger, - core: core, - db: dbConn, - done: make(chan struct{}), - metricsRegistry: metricsRegistry, - coreClient: newCoreClientWithMetrics(stellarcore.Client{ - URL: cfg.StellarCoreURL, - HTTP: &http.Client{Timeout: cfg.CoreRequestTimeout}, - }, metricsRegistry), +func createStellarCoreClient(cfg *config.Config) stellarcore.Client { + return stellarcore.Client{ + URL: cfg.StellarCoreURL, + HTTP: &http.Client{Timeout: cfg.CoreRequestTimeout}, } +} - feewindows := daemon.mustInitializeStorage(cfg) - - onIngestionRetry := func(err error, dur time.Duration) { +func createIngestService(cfg *config.Config, logger *supportlog.Entry, daemon *Daemon, + feewindows *feewindow.FeeWindows, historyArchive *historyarchive.ArchiveInterface, +) *ingest.Service { + onIngestionRetry := func(err error, _ time.Duration) { logger.WithError(err).Error("could not run ingestion. Retrying") } - ingestService := ingest.NewService(ingest.Config{ + return ingest.NewService(ingest.Config{ Logger: logger, DB: db.NewReadWriter( logger, - dbConn, + daemon.db, daemon, maxLedgerEntryWriteBatchSize, cfg.HistoryRetentionWindow, cfg.NetworkPassphrase, ), NetworkPassPhrase: cfg.NetworkPassphrase, - Archive: historyArchive, - LedgerBackend: core, + Archive: *historyArchive, + LedgerBackend: daemon.core, Timeout: cfg.IngestionTimeout, OnIngestionRetry: onIngestionRetry, Daemon: daemon, FeeWindows: feewindows, }) +} - ledgerEntryReader := db.NewLedgerEntryReader(dbConn) - preflightWorkerPool := preflight.NewPreflightWorkerPool( +func createPreflightWorkerPool(cfg *config.Config, logger *supportlog.Entry, daemon *Daemon) *preflight.WorkerPool { + ledgerEntryReader := db.NewLedgerEntryReader(daemon.db) + return preflight.NewPreflightWorkerPool( preflight.WorkerPoolConfig{ Daemon: daemon, WorkerCount: cfg.PreflightWorkerCount, @@ -239,82 +274,74 @@ func MustNew(cfg *config.Config, logger *supportlog.Entry) *Daemon { Logger: logger, }, ) +} - jsonRPCHandler := internal.NewJSONRPCHandler(cfg, internal.HandlerParams{ +func createJSONRPCHandler(cfg *config.Config, logger *supportlog.Entry, daemon *Daemon, + feewindows *feewindow.FeeWindows, +) *internal.Handler { + rpcHandler := internal.NewJSONRPCHandler(cfg, internal.HandlerParams{ Daemon: daemon, FeeStatWindows: feewindows, Logger: logger, - LedgerReader: db.NewLedgerReader(dbConn), - LedgerEntryReader: db.NewLedgerEntryReader(dbConn), - TransactionReader: db.NewTransactionReader(logger, dbConn, cfg.NetworkPassphrase), - EventReader: db.NewEventReader(logger, dbConn, cfg.NetworkPassphrase), - PreflightGetter: preflightWorkerPool, + LedgerReader: db.NewLedgerReader(daemon.db), + LedgerEntryReader: db.NewLedgerEntryReader(daemon.db), + TransactionReader: db.NewTransactionReader(logger, daemon.db, cfg.NetworkPassphrase), + EventReader: db.NewEventReader(logger, daemon.db, cfg.NetworkPassphrase), + PreflightGetter: daemon.preflightWorkerPool, }) + return &rpcHandler +} - httpHandler := supporthttp.NewAPIMux(logger) - httpHandler.Handle("/", jsonRPCHandler) - - daemon.preflightWorkerPool = preflightWorkerPool - daemon.ingestService = ingestService - daemon.jsonRPCHandler = &jsonRPCHandler - - // Use a separate listener in order to obtain the actual TCP port - // when using dynamic ports during testing (e.g. endpoint="localhost:0") - daemon.listener, err = net.Listen("tcp", cfg.Endpoint) +func (d *Daemon) setupHTTPServers(cfg *config.Config) { + var err error + d.listener, err = net.Listen("tcp", cfg.Endpoint) if err != nil { - daemon.logger.WithError(err).WithField("endpoint", cfg.Endpoint).Fatal("cannot listen on endpoint") + d.logger.WithError(err).WithField("endpoint", cfg.Endpoint).Fatal("cannot listen on endpoint") } - daemon.server = &http.Server{ - Handler: httpHandler, + d.server = &http.Server{ + Handler: createHTTPHandler(d.logger, d.jsonRPCHandler), ReadTimeout: defaultReadTimeout, } + if cfg.AdminEndpoint != "" { - adminMux := supporthttp.NewMux(logger) - adminMux.HandleFunc("/debug/pprof/", pprof.Index) - adminMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - adminMux.HandleFunc("/debug/pprof/profile", pprof.Profile) - adminMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - adminMux.HandleFunc("/debug/pprof/trace", pprof.Trace) - // add the entry points for: - // goroutine, threadcreate, heap, allocs, block, mutex - for _, profile := range runtimePprof.Profiles() { - adminMux.Handle("/debug/pprof/"+profile.Name(), pprof.Handler(profile.Name())) - } - adminMux.Handle("/metrics", promhttp.HandlerFor(metricsRegistry, promhttp.HandlerOpts{})) - daemon.adminListener, err = net.Listen("tcp", cfg.AdminEndpoint) - if err != nil { - daemon.logger.WithError(err).WithField("endpoint", cfg.Endpoint).Fatal("cannot listen on admin endpoint") - } - daemon.adminServer = &http.Server{Handler: adminMux} + d.setupAdminServer(cfg) } - daemon.registerMetrics() - return daemon +} + +func createHTTPHandler(logger *supportlog.Entry, jsonRPCHandler *internal.Handler) http.Handler { + httpHandler := supporthttp.NewAPIMux(logger) + httpHandler.Handle("/", jsonRPCHandler) + return httpHandler +} + +func (d *Daemon) setupAdminServer(cfg *config.Config) { + var err error + adminMux := createAdminMux(d.logger, d.metricsRegistry) + d.adminListener, err = net.Listen("tcp", cfg.AdminEndpoint) + if err != nil { + d.logger.WithError(err).WithField("endpoint", cfg.AdminEndpoint).Fatal("cannot listen on admin endpoint") + } + d.adminServer = &http.Server{Handler: adminMux} //nolint:gosec +} + +func createAdminMux(logger *supportlog.Entry, metricsRegistry *prometheus.Registry) *chi.Mux { + adminMux := supporthttp.NewMux(logger) + adminMux.HandleFunc("/debug/pprof/", pprof.Index) + adminMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + adminMux.HandleFunc("/debug/pprof/profile", pprof.Profile) + adminMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + adminMux.HandleFunc("/debug/pprof/trace", pprof.Trace) + for _, profile := range runtimePprof.Profiles() { + adminMux.Handle("/debug/pprof/"+profile.Name(), pprof.Handler(profile.Name())) + } + adminMux.Handle("/metrics", promhttp.HandlerFor(metricsRegistry, promhttp.HandlerOpts{})) + return adminMux } // mustInitializeStorage initializes the storage using what was on the DB func (d *Daemon) mustInitializeStorage(cfg *config.Config) *feewindow.FeeWindows { - // - // There's some complex "ledger window math" here so we should clarify it - // beforehand. - // - // There are two windows in play here: - // - the ledger retention window, which describes the range of txmeta - // to keep relative to the latest "ledger tip" of the network - // - the fee stats window, which describes a *subset* of the prior - // ledger retention window on which to perform fee analysis - // - // If the fee window *exceeds* the retention window, this doesn't make any - // sense since it implies the user wants to store N amount of actual - // historical data and M > N amount of ledgers just for fee processing, - // which is nonsense from a performance standpoint. We prevent this: - maxFeeRetentionWindow := max( - cfg.ClassicFeeStatsLedgerRetentionWindow, - cfg.SorobanFeeStatsLedgerRetentionWindow) - if maxFeeRetentionWindow > cfg.HistoryRetentionWindow { - d.logger.Fatalf( - "Fee stat analysis window (%d) cannot exceed history retention window (%d).", - maxFeeRetentionWindow, cfg.HistoryRetentionWindow) - } + readTxMetaCtx, cancelReadTxMeta := context.WithTimeout(context.Background(), cfg.IngestionTimeout) + defer cancelReadTxMeta() feeWindows := feewindow.NewFeeWindows( cfg.ClassicFeeStatsLedgerRetentionWindow, @@ -323,39 +350,19 @@ func (d *Daemon) mustInitializeStorage(cfg *config.Config) *feewindow.FeeWindows d.db, ) - readTxMetaCtx, cancelReadTxMeta := context.WithTimeout(context.Background(), cfg.IngestionTimeout) - defer cancelReadTxMeta() - - // To combine these windows, we launch as follows: - // // 1. First, identify the ledger range for database migrations based on the // ledger retention window. Since we don't do "partial" migrations (all or // nothing), this represents the entire range of ledger metas we store. - // retentionRange, err := db.GetMigrationLedgerRange(readTxMetaCtx, d.db, cfg.HistoryRetentionWindow) if err != nil { d.logger.WithError(err).Fatal("could not get ledger range for migration") } - dataMigrations, err := db.BuildMigrations( - readTxMetaCtx, d.logger, d.db, cfg.NetworkPassphrase, retentionRange) - if err != nil { - d.logger.WithError(err).Fatal("could not build migrations") - } - - // 2. Then, incorporate the fee analysis window. If there are migrations to - // do, this has no effect, since migration windows are larger than the fee - // window. In the absence of migrations, though, this means the ingestion + // 2. Then, we build migrations for transactions and events, also incorporating the fee windows. + // If there are migrations to do, this has no effect, since migration windows are larger than + // the fee window. In the absence of migrations, though, this means the ingestion // range is just the fee stat range. - // - feeStatsRange, err := db.GetMigrationLedgerRange(readTxMetaCtx, d.db, maxFeeRetentionWindow) - if err != nil { - d.logger.WithError(err).Fatal("could not get ledger range for fee stats") - } - - // Additionally, by treating the fee window *as if* it's a migration, we can - // make the interface here really clean. - dataMigrations.Append(feeWindows.AsMigration(feeStatsRange)) + dataMigrations := d.buildMigrations(readTxMetaCtx, cfg, retentionRange, feeWindows) ledgerSeqRange := dataMigrations.ApplicableRange() // @@ -405,6 +412,44 @@ func (d *Daemon) mustInitializeStorage(cfg *config.Config) *feewindow.FeeWindows return feeWindows } +func (d *Daemon) buildMigrations(ctx context.Context, cfg *config.Config, retentionRange db.LedgerSeqRange, + feeWindows *feewindow.FeeWindows, +) db.MultiMigration { + // There are two windows in play here: + // - the ledger retention window, which describes the range of txmeta + // to keep relative to the latest "ledger tip" of the network + // - the fee stats window, which describes a *subset* of the prior + // ledger retention window on which to perform fee analysis + // + // If the fee window *exceeds* the retention window, this doesn't make any + // sense since it implies the user wants to store N amount of actual + // historical data and M > N amount of ledgers just for fee processing, + // which is nonsense from a performance standpoint. We prevent this: + maxFeeRetentionWindow := max( + cfg.ClassicFeeStatsLedgerRetentionWindow, + cfg.SorobanFeeStatsLedgerRetentionWindow) + if maxFeeRetentionWindow > cfg.HistoryRetentionWindow { + d.logger.Fatalf( + "Fee stat analysis window (%d) cannot exceed history retention window (%d).", + maxFeeRetentionWindow, cfg.HistoryRetentionWindow) + } + + dataMigrations, err := db.BuildMigrations( + ctx, d.logger, d.db, cfg.NetworkPassphrase, retentionRange) + if err != nil { + d.logger.WithError(err).Fatal("could not build migrations") + } + + feeStatsRange, err := db.GetMigrationLedgerRange(ctx, d.db, maxFeeRetentionWindow) + if err != nil { + d.logger.WithError(err).Fatal("could not get ledger range for fee stats") + } + + // By treating the fee window *as if* it's a migration, we can make the interface here clean. + dataMigrations.Append(feeWindows.AsMigration(feeStatsRange)) + return dataMigrations +} + func (d *Daemon) Run() { d.logger.WithField("addr", d.listener.Addr().String()).Info("starting HTTP server") diff --git a/cmd/soroban-rpc/internal/daemon/interfaces/noOpDaemon.go b/cmd/soroban-rpc/internal/daemon/interfaces/noOpDaemon.go index a5ba0db3..255cc4b2 100644 --- a/cmd/soroban-rpc/internal/daemon/interfaces/noOpDaemon.go +++ b/cmd/soroban-rpc/internal/daemon/interfaces/noOpDaemon.go @@ -9,7 +9,7 @@ import ( proto "github.com/stellar/go/protocols/stellarcore" ) -// The noOpDeamon is a dummy daemon implementation, supporting the Daemon interface. +// NoOpDaemon The noOpDeamon is a dummy daemon implementation, supporting the Daemon interface. // Used only in testing. type NoOpDaemon struct { metricsRegistry *prometheus.Registry diff --git a/cmd/soroban-rpc/internal/daemon/metrics.go b/cmd/soroban-rpc/internal/daemon/metrics.go index f283a3b7..795ad12b 100644 --- a/cmd/soroban-rpc/internal/daemon/metrics.go +++ b/cmd/soroban-rpc/internal/daemon/metrics.go @@ -62,12 +62,12 @@ func newCoreClientWithMetrics(client stellarcore.Client, registry *prometheus.Re submitMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: prometheusNamespace, Subsystem: "txsub", Name: "submission_duration_seconds", Help: "submission durations to Stellar-Core, sliding window = 10m", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }, []string{"status"}) opCountMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: prometheusNamespace, Subsystem: "txsub", Name: "operation_count", Help: "number of operations included in a transaction, sliding window = 10m", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }, []string{"status"}) registry.MustRegister(submitMetric, opCountMetric) @@ -78,7 +78,9 @@ func newCoreClientWithMetrics(client stellarcore.Client, registry *prometheus.Re } } -func (c *CoreClientWithMetrics) SubmitTransaction(ctx context.Context, envelopeBase64 string) (*proto.TXResponse, error) { +func (c *CoreClientWithMetrics) SubmitTransaction(ctx context.Context, + envelopeBase64 string, +) (*proto.TXResponse, error) { var envelope xdr.TransactionEnvelope err := xdr.SafeUnmarshalBase64(envelopeBase64, &envelope) if err != nil { @@ -89,15 +91,17 @@ func (c *CoreClientWithMetrics) SubmitTransaction(ctx context.Context, envelopeB response, err := c.Client.SubmitTransaction(ctx, envelopeBase64) duration := time.Since(startTime).Seconds() - var label prometheus.Labels - if err != nil { - label = prometheus.Labels{"status": "request_error"} - } else if response.IsException() { - label = prometheus.Labels{"status": "exception"} - } else { - label = prometheus.Labels{"status": response.Status} + var status string + switch { + case err != nil: + status = "request_error" + case response.IsException(): + status = "exception" + default: + status = response.Status } + label := prometheus.Labels{"status": status} c.submitMetric.With(label).Observe(duration) c.opCountMetric.With(label).Observe(float64(len(envelope.Operations()))) return response, err diff --git a/cmd/soroban-rpc/internal/db/cursor.go b/cmd/soroban-rpc/internal/db/cursor.go index 7d009df6..8393f104 100644 --- a/cmd/soroban-rpc/internal/db/cursor.go +++ b/cmd/soroban-rpc/internal/db/cursor.go @@ -10,6 +10,10 @@ import ( "github.com/stellar/go/toid" ) +const ( + numCursorParts = 2 +) + // Cursor represents the position of a Soroban event. // Soroban events are sorted in ascending order by // ledger sequence, transaction index, operation index, @@ -65,8 +69,8 @@ func (c *Cursor) UnmarshalJSON(b []byte) error { // ParseCursor parses the given string and returns the corresponding cursor func ParseCursor(input string) (Cursor, error) { - parts := strings.SplitN(input, "-", 2) - if len(parts) != 2 { + parts := strings.SplitN(input, "-", numCursorParts) + if len(parts) != numCursorParts { return Cursor{}, fmt.Errorf("invalid event id %s", input) } diff --git a/cmd/soroban-rpc/internal/db/cursor_test.go b/cmd/soroban-rpc/internal/db/cursor_test.go index b081a98b..ab4c788d 100644 --- a/cmd/soroban-rpc/internal/db/cursor_test.go +++ b/cmd/soroban-rpc/internal/db/cursor_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParseCursor(t *testing.T) { @@ -30,7 +31,7 @@ func TestParseCursor(t *testing.T) { }, } { parsed, err := ParseCursor(cursor.String()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, cursor, parsed) } } @@ -51,9 +52,9 @@ func TestCursorJSON(t *testing.T) { }, 100}, } { result, err := json.Marshal(testCase) - assert.NoError(t, err) + require.NoError(t, err) var parsed options - assert.NoError(t, json.Unmarshal(result, &parsed)) + require.NoError(t, json.Unmarshal(result, &parsed)) assert.Equal(t, testCase, parsed) } } diff --git a/cmd/soroban-rpc/internal/db/db.go b/cmd/soroban-rpc/internal/db/db.go index 4f9c2560..81a297fe 100644 --- a/cmd/soroban-rpc/internal/db/db.go +++ b/cmd/soroban-rpc/internal/db/db.go @@ -1,3 +1,4 @@ +//nolint:revive package db import ( @@ -62,7 +63,8 @@ func openSQLiteDB(dbFilePath string) (*db.Session, error) { // 2. Disable WAL auto-checkpointing (we will do the checkpointing ourselves with wal_checkpoint pragmas // after every write transaction). // 3. Use synchronous=NORMAL, which is faster and still safe in WAL mode. - session, err := db.Open("sqlite3", fmt.Sprintf("file:%s?_journal_mode=WAL&_wal_autocheckpoint=0&_synchronous=NORMAL", dbFilePath)) + session, err := db.Open("sqlite3", + fmt.Sprintf("file:%s?_journal_mode=WAL&_wal_autocheckpoint=0&_synchronous=NORMAL", dbFilePath)) if err != nil { return nil, fmt.Errorf("open failed: %w", err) } @@ -74,7 +76,9 @@ func openSQLiteDB(dbFilePath string) (*db.Session, error) { return session, nil } -func OpenSQLiteDBWithPrometheusMetrics(dbFilePath string, namespace string, sub db.Subservice, registry *prometheus.Registry) (*DB, error) { +func OpenSQLiteDBWithPrometheusMetrics(dbFilePath string, namespace string, sub db.Subservice, + registry *prometheus.Registry, +) (*DB, error) { session, err := openSQLiteDB(dbFilePath) if err != nil { return nil, err @@ -129,7 +133,8 @@ func getMetaValue(ctx context.Context, q db.SessionInterface, key string) (strin case 1: // expected length on an initialized DB default: - return "", fmt.Errorf("multiple entries (%d) for key %q in table %q", len(results), key, metaTableName) + return "", fmt.Errorf("multiple entries (%d) for key %q in table %q", + len(results), key, metaTableName) } return results[0], nil } @@ -193,7 +198,7 @@ func NewReadWriter( Namespace: daemon.MetricsNamespace(), Subsystem: "transactions", Name: "operation_duration_seconds", Help: "transaction store operation durations, sliding window = 10m", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }, []string{"operation"}, ) @@ -201,7 +206,7 @@ func NewReadWriter( Namespace: daemon.MetricsNamespace(), Subsystem: "transactions", Name: "count", Help: "count of transactions ingested, sliding window = 10m", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }) daemon.MetricsRegistry().MustRegister(txDurationMetric, txCountMetric) @@ -341,11 +346,11 @@ func (w writeTx) Rollback() error { // errors.New("not in transaction") is returned when rolling back a transaction which has // already been committed or rolled back. We can ignore those errors // because we allow rolling back after commits in defer statements. - if err := w.tx.Rollback(); err == nil || err.Error() == "not in transaction" { + var err error + if err = w.tx.Rollback(); err == nil || err.Error() == "not in transaction" { return nil - } else { - return err } + return err } func runSQLMigrations(db *sql.DB, dialect string) error { diff --git a/cmd/soroban-rpc/internal/db/ledgerentry.go b/cmd/soroban-rpc/internal/db/ledgerentry.go index ef704c0d..6230bdee 100644 --- a/cmd/soroban-rpc/internal/db/ledgerentry.go +++ b/cmd/soroban-rpc/internal/db/ledgerentry.go @@ -8,12 +8,14 @@ import ( "fmt" sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/db" "github.com/stellar/go/xdr" ) const ( - ledgerEntriesTableName = "ledger_entries" + ledgerEntriesTableName = "ledger_entries" + numEncodedLedgerEntryKeys = 2 ) type LedgerEntryReader interface { @@ -95,7 +97,7 @@ func (l ledgerEntryWriter) flush() error { } encodedEntryStr := string(encodedEntry) upsertSQL = upsertSQL.Values(key, encodedEntryStr) - upsertCount += 1 + upsertCount++ // Only cache Config entries for now if entry.Data.Type == xdr.LedgerEntryTypeConfigSetting { upsertCacheUpdates[key] = &encodedEntryStr @@ -240,9 +242,25 @@ func hasTTLKey(key xdr.LedgerKey) bool { return true case xdr.LedgerEntryTypeContractCode: return true - default: + case xdr.LedgerEntryTypeAccount: + return false + case xdr.LedgerEntryTypeTrustline: + return false + case xdr.LedgerEntryTypeOffer: + return false + case xdr.LedgerEntryTypeData: + return false + case xdr.LedgerEntryTypeClaimableBalance: + return false + case xdr.LedgerEntryTypeLiquidityPool: + return false + case xdr.LedgerEntryTypeConfigSetting: + return false + case xdr.LedgerEntryTypeTtl: + return false } - return false + // This line should never be reached if all enum values are handled + panic(fmt.Sprintf("unknown LedgerEntryType: %v", key.Type)) } func entryKeyToTTLEntryKey(key xdr.LedgerKey) (xdr.LedgerKey, error) { @@ -259,7 +277,7 @@ func entryKeyToTTLEntryKey(key xdr.LedgerKey) (xdr.LedgerKey, error) { } func (l *ledgerEntryReadTx) GetLedgerEntries(keys ...xdr.LedgerKey) ([]LedgerKeyAndEntry, error) { - encodedKeys := make([]string, 0, 2*len(keys)) + encodedKeys := make([]string, 0, numEncodedLedgerEntryKeys*len(keys)) type keyToEncoded struct { key xdr.LedgerKey encodedKey string diff --git a/cmd/soroban-rpc/internal/db/ledgerentry_test.go b/cmd/soroban-rpc/internal/db/ledgerentry_test.go index d17c560f..0bd97cbc 100644 --- a/cmd/soroban-rpc/internal/db/ledgerentry_test.go +++ b/cmd/soroban-rpc/internal/db/ledgerentry_test.go @@ -2,6 +2,7 @@ package db import ( "context" + "errors" "fmt" "math/rand" "sync" @@ -17,7 +18,9 @@ import ( "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/daemon/interfaces" ) -func getLedgerEntryAndLatestLedgerSequenceWithErr(db *DB, key xdr.LedgerKey) (bool, xdr.LedgerEntry, uint32, *uint32, error) { +func getLedgerEntryAndLatestLedgerSequenceWithErr(db *DB, key xdr.LedgerKey) (bool, xdr.LedgerEntry, + uint32, *uint32, error, +) { tx, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) if err != nil { return false, xdr.LedgerEntry{}, 0, nil, err @@ -40,12 +43,15 @@ func getLedgerEntryAndLatestLedgerSequenceWithErr(db *DB, key xdr.LedgerKey) (bo return present, entry, latestSeq, expSeq, doneErr } -func getLedgerEntryAndLatestLedgerSequence(t require.TestingT, db *DB, key xdr.LedgerKey) (bool, xdr.LedgerEntry, uint32, *uint32) { +func getLedgerEntryAndLatestLedgerSequence(t require.TestingT, db *DB, key xdr.LedgerKey) (bool, xdr.LedgerEntry, + uint32, *uint32, +) { present, entry, latestSeq, expSeq, err := getLedgerEntryAndLatestLedgerSequenceWithErr(db, key) require.NoError(t, err) return present, entry, latestSeq, expSeq } +//nolint:unparam func makeReadWriter(db *DB, batchSize, retentionWindow int) ReadWriter { return NewReadWriter(log.DefaultLogger, db, interfaces.MakeNoOpDeamon(), batchSize, uint32(retentionWindow), passphrase) @@ -53,18 +59,92 @@ func makeReadWriter(db *DB, batchSize, retentionWindow int) ReadWriter { func TestGoldenPath(t *testing.T) { db := NewTestDB(t) - // Check that we get an empty DB error + + t.Run("EmptyDB", func(t *testing.T) { + testEmptyDB(t, db) + }) + + t.Run("InsertEntry", func(t *testing.T) { + testInsertEntry(t, db) + }) + + t.Run("UpdateEntry", func(t *testing.T) { + testUpdateEntry(t, db) + }) + + t.Run("DeleteEntry", func(t *testing.T) { + testDeleteEntry(t, db) + }) +} + +func testEmptyDB(t *testing.T, db *DB) { _, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) assert.Equal(t, ErrEmptyDB, err) +} + +func testInsertEntry(t *testing.T, db *DB) { + tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) + require.NoError(t, err) + writer := tx.LedgerEntryWriter() + + data := createTestContractDataEntry() + key, entry := getContractDataLedgerEntry(t, data) + require.NoError(t, writer.UpsertLedgerEntry(entry)) + + expLedgerKey, err := entryKeyToTTLEntryKey(key) + require.NoError(t, err) + expLegerEntry := getTTLLedgerEntry(expLedgerKey) + require.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) + + ledgerSequence := uint32(23) + ledgerCloseMeta := createLedger(ledgerSequence) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) + verifyInsertedEntry(t, db, key, ledgerSequence, expLegerEntry) +} + +func testUpdateEntry(t *testing.T, db *DB) { tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) writer := tx.LedgerEntryWriter() - // Fill the DB with a single entry and fetch it + data := createTestContractDataEntry() + key, entry := getContractDataLedgerEntry(t, data) + eight := xdr.Uint32(8) + entry.Data.ContractData.Val.U32 = &eight + + require.NoError(t, writer.UpsertLedgerEntry(entry)) + + ledgerSequence := uint32(24) + ledgerCloseMeta := createLedger(ledgerSequence) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) + + verifyUpdatedEntry(t, db, key, ledgerSequence, eight) +} + +func testDeleteEntry(t *testing.T, db *DB) { + tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) + require.NoError(t, err) + writer := tx.LedgerEntryWriter() + + data := createTestContractDataEntry() + key, _ := getContractDataLedgerEntry(t, data) + require.NoError(t, writer.DeleteLedgerEntry(key)) + + ledgerSequence := uint32(25) + ledgerCloseMeta := createLedger(ledgerSequence) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) + + verifyDeletedEntry(t, db, key, ledgerSequence) +} + +func createTestContractDataEntry() xdr.ContractDataEntry { four := xdr.Uint32(4) six := xdr.Uint32(6) - data := xdr.ContractDataEntry{ + return xdr.ContractDataEntry{ Contract: xdr.ScAddress{ Type: xdr.ScAddressTypeScAddressTypeContract, ContractId: &xdr.Hash{0xca, 0xfe}, @@ -79,71 +159,41 @@ func TestGoldenPath(t *testing.T) { U32: &six, }, } - key, entry := getContractDataLedgerEntry(t, data) - assert.NoError(t, writer.UpsertLedgerEntry(entry)) - - expLedgerKey, err := entryKeyToTTLEntryKey(key) - assert.NoError(t, err) - expLegerEntry := getTTLLedgerEntry(expLedgerKey) - assert.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) - - ledgerSequence := uint32(23) - ledgerCloseMeta := createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) +} +func verifyInsertedEntry(t *testing.T, db *DB, key xdr.LedgerKey, ledgerSequence uint32, + expLegerEntry xdr.LedgerEntry, +) { present, obtainedEntry, obtainedLedgerSequence, liveUntilSeq := getLedgerEntryAndLatestLedgerSequence(t, db, key) assert.True(t, present) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) require.NotNil(t, liveUntilSeq) assert.Equal(t, uint32(expLegerEntry.Data.Ttl.LiveUntilLedgerSeq), *liveUntilSeq) - assert.Equal(t, obtainedEntry.Data.Type, xdr.LedgerEntryTypeContractData) + assert.Equal(t, xdr.LedgerEntryTypeContractData, obtainedEntry.Data.Type) assert.Equal(t, xdr.Hash{0xca, 0xfe}, *obtainedEntry.Data.ContractData.Contract.ContractId) - assert.Equal(t, six, *obtainedEntry.Data.ContractData.Val.U32) + assert.Equal(t, xdr.Uint32(6), *obtainedEntry.Data.ContractData.Val.U32) - obtainedLedgerSequence, err = NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) - assert.NoError(t, err) + obtainedLedgerSequence, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) + require.NoError(t, err) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) +} - // Do another round, overwriting the ledger entry - tx, err = makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(t, err) - writer = tx.LedgerEntryWriter() - eight := xdr.Uint32(8) - entry.Data.ContractData.Val.U32 = &eight - - assert.NoError(t, writer.UpsertLedgerEntry(entry)) - - ledgerSequence = uint32(24) - ledgerCloseMeta = createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) - - present, obtainedEntry, obtainedLedgerSequence, liveUntilSeq = getLedgerEntryAndLatestLedgerSequence(t, db, key) +func verifyUpdatedEntry(t *testing.T, db *DB, key xdr.LedgerKey, ledgerSequence uint32, expectedValue xdr.Uint32) { + present, obtainedEntry, obtainedLedgerSequence, liveUntilSeq := getLedgerEntryAndLatestLedgerSequence(t, db, key) assert.True(t, present) require.NotNil(t, liveUntilSeq) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) - assert.Equal(t, eight, *obtainedEntry.Data.ContractData.Val.U32) - - // Do another round, deleting the ledger entry - tx, err = makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(t, err) - writer = tx.LedgerEntryWriter() - assert.NoError(t, err) - - assert.NoError(t, writer.DeleteLedgerEntry(key)) - ledgerSequence = uint32(25) - ledgerCloseMeta = createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) + assert.Equal(t, expectedValue, *obtainedEntry.Data.ContractData.Val.U32) +} - present, _, obtainedLedgerSequence, liveUntilSeq = getLedgerEntryAndLatestLedgerSequence(t, db, key) +func verifyDeletedEntry(t *testing.T, db *DB, key xdr.LedgerKey, ledgerSequence uint32) { + present, _, obtainedLedgerSequence, liveUntilSeq := getLedgerEntryAndLatestLedgerSequence(t, db, key) assert.False(t, present) assert.Nil(t, liveUntilSeq) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) - obtainedLedgerSequence, err = NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) - assert.NoError(t, err) + obtainedLedgerSequence, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) + require.NoError(t, err) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) } @@ -153,7 +203,7 @@ func TestDeleteNonExistentLedgerEmpty(t *testing.T) { // Simulate a ledger which creates and deletes a ledger entry // which would result in trying to delete a ledger entry which isn't there tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) writer := tx.LedgerEntryWriter() four := xdr.Uint32(4) @@ -174,7 +224,7 @@ func TestDeleteNonExistentLedgerEmpty(t *testing.T) { }, } key, _ := getContractDataLedgerEntry(t, data) - assert.NoError(t, writer.DeleteLedgerEntry(key)) + require.NoError(t, writer.DeleteLedgerEntry(key)) ledgerSequence := uint32(23) ledgerCloseMeta := createLedger(ledgerSequence) assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) @@ -182,7 +232,7 @@ func TestDeleteNonExistentLedgerEmpty(t *testing.T) { // Make sure that the ledger number was submitted obtainedLedgerSequence, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) // And that the entry doesn't exist @@ -228,7 +278,7 @@ func TestReadTxsDuringWriteTx(t *testing.T) { // Start filling the DB with a single entry (enforce flushing right away) tx, err := makeReadWriter(db, 0, 15).NewTx(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) writer := tx.LedgerEntryWriter() four := xdr.Uint32(4) @@ -248,43 +298,42 @@ func TestReadTxsDuringWriteTx(t *testing.T) { }, } key, entry := getContractDataLedgerEntry(t, data) - assert.NoError(t, writer.UpsertLedgerEntry(entry)) + require.NoError(t, writer.UpsertLedgerEntry(entry)) expLedgerKey, err := entryKeyToTTLEntryKey(key) - assert.NoError(t, err) + require.NoError(t, err) expLegerEntry := getTTLLedgerEntry(expLedgerKey) - assert.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) + require.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) // Before committing the changes, make sure multiple concurrent transactions can query the DB readTx1, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) - assert.NoError(t, err) + require.NoError(t, err) readTx2, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) - assert.NoError(t, err) + require.NoError(t, err) _, err = readTx1.GetLatestLedgerSequence() assert.Equal(t, ErrEmptyDB, err) present, _, expSeq, err := GetLedgerEntry(readTx1, key) require.Nil(t, expSeq) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, present) - assert.NoError(t, readTx1.Done()) + require.NoError(t, readTx1.Done()) _, err = readTx2.GetLatestLedgerSequence() assert.Equal(t, ErrEmptyDB, err) present, _, expSeq, err = GetLedgerEntry(readTx2, key) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, present) assert.Nil(t, expSeq) - assert.NoError(t, readTx2.Done()) - + require.NoError(t, readTx2.Done()) // Finish the write transaction and check that the results are present ledgerSequence := uint32(23) ledgerCloseMeta := createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) obtainedLedgerSequence, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) present, obtainedEntry, obtainedLedgerSequence, expSeq := getLedgerEntryAndLatestLedgerSequence(t, db, key) @@ -307,16 +356,16 @@ func TestWriteTxsDuringReadTxs(t *testing.T) { // First read transaction, before the write transaction is created readTx1, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) - assert.NoError(t, err) + require.NoError(t, err) // Start filling the DB with a single entry (enforce flushing right away) tx, err := makeReadWriter(db, 0, 15).NewTx(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) writer := tx.LedgerEntryWriter() // Second read transaction, after the write transaction is created readTx2, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) - assert.NoError(t, err) + require.NoError(t, err) four := xdr.Uint32(4) six := xdr.Uint32(6) @@ -336,42 +385,42 @@ func TestWriteTxsDuringReadTxs(t *testing.T) { }, } key, entry := getContractDataLedgerEntry(t, data) - assert.NoError(t, writer.UpsertLedgerEntry(entry)) + require.NoError(t, writer.UpsertLedgerEntry(entry)) expLedgerKey, err := entryKeyToTTLEntryKey(key) - assert.NoError(t, err) + require.NoError(t, err) expLegerEntry := getTTLLedgerEntry(expLedgerKey) - assert.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) + require.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) // Third read transaction, after the first insert has happened in the write transaction readTx3, err := NewLedgerEntryReader(db).NewTx(context.Background(), false) - assert.NoError(t, err) + require.NoError(t, err) // Make sure that all the read transactions get an emptyDB error before and after the write transaction is committed for _, readTx := range []LedgerEntryReadTx{readTx1, readTx2, readTx3} { _, err = readTx.GetLatestLedgerSequence() assert.Equal(t, ErrEmptyDB, err) present, _, _, err := GetLedgerEntry(readTx, key) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, present) } // commit the write transaction ledgerSequence := uint32(23) ledgerCloseMeta := createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) for _, readTx := range []LedgerEntryReadTx{readTx1, readTx2, readTx3} { present, _, _, err := GetLedgerEntry(readTx, key) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, present) } // Check that the results are present in the transactions happening after the commit obtainedLedgerSequence, err := NewLedgerEntryReader(db).GetLatestLedgerSequence(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, ledgerSequence, obtainedLedgerSequence) present, obtainedEntry, obtainedLedgerSequence, expSeq := getLedgerEntryAndLatestLedgerSequence(t, db, key) @@ -381,117 +430,145 @@ func TestWriteTxsDuringReadTxs(t *testing.T) { assert.Equal(t, six, *obtainedEntry.Data.ContractData.Val.U32) for _, readTx := range []LedgerEntryReadTx{readTx1, readTx2, readTx3} { - assert.NoError(t, readTx.Done()) + require.NoError(t, readTx.Done()) } } // Check that we can have coexisting reader and writer goroutines without deadlocks or errors func TestConcurrentReadersAndWriter(t *testing.T) { db := NewTestDB(t) - contractID := xdr.Hash{0xca, 0xfe} done := make(chan struct{}) var wg sync.WaitGroup logMessageCh := make(chan string, 1) - writer := func() { - defer wg.Done() - data := func(i int) xdr.ContractDataEntry { - val := xdr.Uint32(i) - return xdr.ContractDataEntry{ - Contract: xdr.ScAddress{ - Type: xdr.ScAddressTypeScAddressTypeContract, - ContractId: &contractID, - }, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvU32, - U32: &val, - }, - Durability: xdr.ContractDataDurabilityPersistent, - Val: xdr.ScVal{ - Type: xdr.ScValTypeScvU32, - U32: &val, - }, - } - } - rw := makeReadWriter(db, 10, 15) - for ledgerSequence := uint32(0); ledgerSequence < 1000; ledgerSequence++ { - tx, err := rw.NewTx(context.Background()) - assert.NoError(t, err) - writer := tx.LedgerEntryWriter() - for i := 0; i < 200; i++ { - key, entry := getContractDataLedgerEntry(t, data(i)) - assert.NoError(t, writer.UpsertLedgerEntry(entry)) - expLedgerKey, err := entryKeyToTTLEntryKey(key) - assert.NoError(t, err) - expLegerEntry := getTTLLedgerEntry(expLedgerKey) - assert.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) - } - ledgerCloseMeta := createLedger(ledgerSequence) - assert.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) - assert.NoError(t, tx.Commit(ledgerCloseMeta)) - logMessageCh <- fmt.Sprintf("Wrote ledger %d", ledgerSequence) - time.Sleep(time.Duration(rand.Int31n(30)) * time.Millisecond) - } - close(done) + + wg.Add(1) + go writer(t, db, contractID, done, &wg, logMessageCh) + + for i := 1; i <= 32; i++ { + wg.Add(1) + go reader(t, db, contractID, i, done, &wg, logMessageCh) } - reader := func(keyVal int) { - defer wg.Done() - val := xdr.Uint32(keyVal) - key := xdr.LedgerKey{ - Type: xdr.LedgerEntryTypeContractData, - ContractData: &xdr.LedgerKeyContractData{ - Contract: xdr.ScAddress{ - Type: xdr.ScAddressTypeScAddressTypeContract, - ContractId: &contractID, - }, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvU32, - U32: &val, - }, - Durability: xdr.ContractDataDurabilityPersistent, - }, - } - for { - select { - case <-done: - return - default: - } - found, ledgerEntry, ledger, _, err := getLedgerEntryAndLatestLedgerSequenceWithErr(db, key) - if err != nil { - if err != ErrEmptyDB { - t.Fatalf("reader %d failed with error %v\n", keyVal, err) - } - } else { - // All entries should be found once the first write commit is done - assert.True(t, found) - logMessageCh <- fmt.Sprintf("reader %d: for ledger %d", keyVal, ledger) - assert.Equal(t, xdr.Uint32(keyVal), *ledgerEntry.Data.ContractData.Val.U32) - } + + monitorWorkers(t, &wg, logMessageCh) +} + +func writer(t *testing.T, db *DB, contractID xdr.Hash, done chan struct{}, + wg *sync.WaitGroup, logMessageCh chan<- string, +) { + defer wg.Done() + defer close(done) + + rw := makeReadWriter(db, 10, 15) + for ledgerSequence := range 1000 { + writeLedger(t, rw, contractID, uint32(ledgerSequence)) + logMessageCh <- fmt.Sprintf("Wrote ledger %d", ledgerSequence) + time.Sleep(time.Duration(rand.Int31n(30)) * time.Millisecond) + } +} + +func writeLedger(t *testing.T, rw ReadWriter, contractID xdr.Hash, ledgerSequence uint32) { + tx, err := rw.NewTx(context.Background()) + require.NoError(t, err) + writer := tx.LedgerEntryWriter() + + for i := range 200 { + writeEntry(t, writer, contractID, i) + } + + ledgerCloseMeta := createLedger(ledgerSequence) + require.NoError(t, tx.LedgerWriter().InsertLedger(ledgerCloseMeta)) + require.NoError(t, tx.Commit(ledgerCloseMeta)) +} + +func writeEntry(t *testing.T, writer LedgerEntryWriter, contractID xdr.Hash, i int) { + key, entry := getContractDataLedgerEntry(t, createContractDataEntry(contractID, i)) + require.NoError(t, writer.UpsertLedgerEntry(entry)) + + expLedgerKey, err := entryKeyToTTLEntryKey(key) + require.NoError(t, err) + expLegerEntry := getTTLLedgerEntry(expLedgerKey) + require.NoError(t, writer.UpsertLedgerEntry(expLegerEntry)) +} + +func createContractDataEntry(contractID xdr.Hash, i int) xdr.ContractDataEntry { + val := xdr.Uint32(i) + return xdr.ContractDataEntry{ + Contract: xdr.ScAddress{ + Type: xdr.ScAddressTypeScAddressTypeContract, + ContractId: &contractID, + }, + Key: xdr.ScVal{ + Type: xdr.ScValTypeScvU32, + U32: &val, + }, + Durability: xdr.ContractDataDurabilityPersistent, + Val: xdr.ScVal{ + Type: xdr.ScValTypeScvU32, + U32: &val, + }, + } +} + +func reader(t *testing.T, db *DB, contractID xdr.Hash, keyVal int, done <-chan struct{}, + wg *sync.WaitGroup, logMessageCh chan<- string, +) { + defer wg.Done() + key := createLedgerKey(contractID, keyVal) + + for { + select { + case <-done: + return + default: + readAndVerifyEntry(t, db, key, keyVal, logMessageCh) time.Sleep(time.Duration(rand.Int31n(30)) * time.Millisecond) } } +} - // one readWriter, 32 readers - wg.Add(1) - go writer() +func createLedgerKey(contractID xdr.Hash, keyVal int) xdr.LedgerKey { + val := xdr.Uint32(keyVal) + return xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeContractData, + ContractData: &xdr.LedgerKeyContractData{ + Contract: xdr.ScAddress{ + Type: xdr.ScAddressTypeScAddressTypeContract, + ContractId: &contractID, + }, + Key: xdr.ScVal{ + Type: xdr.ScValTypeScvU32, + U32: &val, + }, + Durability: xdr.ContractDataDurabilityPersistent, + }, + } +} - for i := 1; i <= 32; i++ { - wg.Add(1) - go reader(i) +func readAndVerifyEntry(t *testing.T, db *DB, key xdr.LedgerKey, keyVal int, logMessageCh chan<- string) { + found, ledgerEntry, ledger, _, err := getLedgerEntryAndLatestLedgerSequenceWithErr(db, key) + if err != nil { + if !errors.Is(err, ErrEmptyDB) { + t.Fatalf("reader %d failed with error %v\n", keyVal, err) + } + } else { + assert.True(t, found) + logMessageCh <- fmt.Sprintf("reader %d: for ledger %d", keyVal, ledger) + assert.Equal(t, xdr.Uint32(keyVal), *ledgerEntry.Data.ContractData.Val.U32) } +} +func monitorWorkers(t *testing.T, wg *sync.WaitGroup, logMessageCh <-chan string) { workersExitCh := make(chan struct{}) go func() { defer close(workersExitCh) wg.Wait() }() -forloop: for { select { case <-workersExitCh: - break forloop + return case msg := <-logMessageCh: t.Log(msg) } @@ -518,12 +595,12 @@ func benchmarkLedgerEntry(b *testing.B, cached bool) { } key, entry := getContractDataLedgerEntry(b, data) tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(b, err) - assert.NoError(b, tx.LedgerEntryWriter().UpsertLedgerEntry(entry)) + require.NoError(b, err) + require.NoError(b, tx.LedgerEntryWriter().UpsertLedgerEntry(entry)) expLedgerKey, err := entryKeyToTTLEntryKey(key) - assert.NoError(b, err) - assert.NoError(b, tx.LedgerEntryWriter().UpsertLedgerEntry(getTTLLedgerEntry(expLedgerKey))) - assert.NoError(b, tx.Commit(createLedger(2))) + require.NoError(b, err) + require.NoError(b, tx.LedgerEntryWriter().UpsertLedgerEntry(getTTLLedgerEntry(expLedgerKey))) + require.NoError(b, tx.Commit(createLedger(2))) reader := NewLedgerEntryReader(db) const numQueriesPerOp = 15 b.ResetTimer() @@ -536,15 +613,15 @@ func benchmarkLedgerEntry(b *testing.B, cached bool) { } else { readTx, err = reader.NewTx(context.Background(), false) } - assert.NoError(b, err) - for i := 0; i < numQueriesPerOp; i++ { + require.NoError(b, err) + for range numQueriesPerOp { b.StartTimer() found, _, _, err := GetLedgerEntry(readTx, key) b.StopTimer() - assert.NoError(b, err) + require.NoError(b, err) assert.True(b, found) } - assert.NoError(b, readTx.Done()) + require.NoError(b, readTx.Done()) } } @@ -576,12 +653,12 @@ func BenchmarkLedgerUpdate(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { tx, err := makeReadWriter(db, 150, 15).NewTx(context.Background()) - assert.NoError(b, err) + require.NoError(b, err) writer := tx.LedgerEntryWriter() - for j := 0; j < numEntriesPerOp; j++ { + for j := range numEntriesPerOp { keyUint32 = xdr.Uint32(j) - assert.NoError(b, writer.UpsertLedgerEntry(entry)) + require.NoError(b, writer.UpsertLedgerEntry(entry)) } - assert.NoError(b, tx.Commit(createLedger(uint32(i+1)))) + require.NoError(b, tx.Commit(createLedger(uint32(i+1)))) } } diff --git a/cmd/soroban-rpc/internal/feewindow/feewindow.go b/cmd/soroban-rpc/internal/feewindow/feewindow.go index 08a84ab4..24256935 100644 --- a/cmd/soroban-rpc/internal/feewindow/feewindow.go +++ b/cmd/soroban-rpc/internal/feewindow/feewindow.go @@ -152,7 +152,7 @@ func (fw *FeeWindows) IngestFees(meta xdr.LedgerCloseMeta) error { var classicFees []uint64 for { tx, err := reader.Read() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { @@ -165,7 +165,7 @@ func (fw *FeeWindows) IngestFees(meta xdr.LedgerCloseMeta) error { continue } if len(ops) == 1 { - switch ops[0].Body.Type { + switch ops[0].Body.Type { //nolint:exhaustive case xdr.OperationTypeInvokeHostFunction, xdr.OperationTypeExtendFootprintTtl, xdr.OperationTypeRestoreFootprint: if tx.UnsafeMeta.V != 3 || tx.UnsafeMeta.V3.SorobanMeta == nil || tx.UnsafeMeta.V3.SorobanMeta.Ext.V != 1 { continue diff --git a/cmd/soroban-rpc/internal/feewindow/feewindow_test.go b/cmd/soroban-rpc/internal/feewindow/feewindow_test.go index cd28bcad..708c0190 100644 --- a/cmd/soroban-rpc/internal/feewindow/feewindow_test.go +++ b/cmd/soroban-rpc/internal/feewindow/feewindow_test.go @@ -12,7 +12,7 @@ import ( ) func TestBasicComputeFeeDistribution(t *testing.T) { - for _, testCase := range []struct { + testCases := []struct { name string input []uint64 output FeeDistribution @@ -23,20 +23,9 @@ func TestBasicComputeFeeDistribution(t *testing.T) { "one", []uint64{100}, FeeDistribution{ - Max: 100, - Min: 100, - Mode: 100, - P10: 100, - P20: 100, - P30: 100, - P40: 100, - P50: 100, - P60: 100, - P70: 100, - P80: 100, - P90: 100, - P95: 100, - P99: 100, + Max: 100, Min: 100, Mode: 100, + P10: 100, P20: 100, P30: 100, P40: 100, P50: 100, + P60: 100, P70: 100, P80: 100, P90: 100, P95: 100, P99: 100, FeeCount: 1, }, }, @@ -44,20 +33,9 @@ func TestBasicComputeFeeDistribution(t *testing.T) { "even number of elements: four 100s and six 1000s", []uint64{100, 100, 100, 1000, 100, 1000, 1000, 1000, 1000, 1000}, FeeDistribution{ - Max: 1000, - Min: 100, - Mode: 1000, - P10: 100, - P20: 100, - P30: 100, - P40: 100, - P50: 1000, - P60: 1000, - P70: 1000, - P80: 1000, - P90: 1000, - P95: 1000, - P99: 1000, + Max: 1000, Min: 100, Mode: 1000, + P10: 100, P20: 100, P30: 100, P40: 100, P50: 1000, + P60: 1000, P70: 1000, P80: 1000, P90: 1000, P95: 1000, P99: 1000, FeeCount: 10, }, }, @@ -65,41 +43,19 @@ func TestBasicComputeFeeDistribution(t *testing.T) { "odd number of elements: five 100s and six 1000s", []uint64{100, 100, 100, 1000, 100, 1000, 1000, 1000, 1000, 1000, 100}, FeeDistribution{ - Max: 1000, - Min: 100, - Mode: 1000, - P10: 100, - P20: 100, - P30: 100, - P40: 100, - P50: 1000, - P60: 1000, - P70: 1000, - P80: 1000, - P90: 1000, - P95: 1000, - P99: 1000, + Max: 1000, Min: 100, Mode: 1000, + P10: 100, P20: 100, P30: 100, P40: 100, P50: 1000, + P60: 1000, P70: 1000, P80: 1000, P90: 1000, P95: 1000, P99: 1000, FeeCount: 11, }, }, { - "mutiple modes favors the smallest value", + "multiple modes favors the smallest value", []uint64{100, 1000}, FeeDistribution{ - Max: 1000, - Min: 100, - Mode: 100, - P10: 100, - P20: 100, - P30: 100, - P40: 100, - P50: 100, - P60: 1000, - P70: 1000, - P80: 1000, - P90: 1000, - P95: 1000, - P99: 1000, + Max: 1000, Min: 100, Mode: 100, + P10: 100, P20: 100, P30: 100, P40: 100, P50: 100, + P60: 1000, P70: 1000, P80: 1000, P90: 1000, P95: 1000, P99: 1000, FeeCount: 2, }, }, @@ -107,20 +63,9 @@ func TestBasicComputeFeeDistribution(t *testing.T) { "random distribution with a repetition", []uint64{515, 245, 245, 530, 221, 262, 927}, FeeDistribution{ - Max: 927, - Min: 221, - Mode: 245, - P10: 221, - P20: 245, - P30: 245, - P40: 245, - P50: 262, - P60: 515, - P70: 515, - P80: 530, - P90: 927, - P95: 927, - P99: 927, + Max: 927, Min: 221, Mode: 245, + P10: 221, P20: 245, P30: 245, P40: 245, P50: 262, + P60: 515, P70: 515, P80: 530, P90: 927, P95: 927, P99: 927, FeeCount: 7, }, }, @@ -128,34 +73,28 @@ func TestBasicComputeFeeDistribution(t *testing.T) { "random distribution with a repetition of its largest value", []uint64{515, 245, 530, 221, 262, 927, 927}, FeeDistribution{ - Max: 927, - Min: 221, - Mode: 927, - P10: 221, - P20: 245, - P30: 262, - P40: 262, - P50: 515, - P60: 530, - P70: 530, - P80: 927, - P90: 927, - P95: 927, - P99: 927, + Max: 927, Min: 221, Mode: 927, + P10: 221, P20: 245, P30: 262, P40: 262, P50: 515, + P60: 530, P70: 530, P80: 927, P90: 927, P95: 927, P99: 927, FeeCount: 7, }, }, - } { - assert.Equal(t, computeFeeDistribution(testCase.input, 0), testCase.output, testCase.name) + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := computeFeeDistribution(tc.input, 0) + assert.Equal(t, tc.output, result) + }) } } func TestComputeFeeDistributionAgainstAlternative(t *testing.T) { - for i := 0; i < 100_000; i++ { + for range 100_000 { fees := generateFees(nil) feesCopy1 := make([]uint64, len(fees)) feesCopy2 := make([]uint64, len(fees)) - for i := 0; i < len(fees); i++ { + for i := range len(fees) { feesCopy1[i] = fees[i] feesCopy2[i] = fees[i] } @@ -176,7 +115,7 @@ func generateFees(l *int) []uint64 { } result := make([]uint64, length) lastFee := uint64(0) - for i := 0; i < length; i++ { + for i := range length { if lastFee != 0 && rand.Intn(100) <= 25 { // To test the Mode correctly, generate a repetition with a chance of 25% result[i] = lastFee @@ -193,13 +132,14 @@ func BenchmarkComputeFeeDistribution(b *testing.B) { length := 5000 fees := generateFees(&length) b.Run("computeFeeDistribution", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { computeFeeDistribution(fees, 0) } }) b.Run("alternativeComputeFeeDistribution", func(b *testing.B) { - for i := 0; i < b.N; i++ { - alternativeComputeFeeDistribution(fees, 0) + for range b.N { + _, err := alternativeComputeFeeDistribution(fees, 0) + require.NoError(b, err) } }) } @@ -208,89 +148,77 @@ func alternativeComputeFeeDistribution(fees []uint64, ledgerCount uint32) (FeeDi if len(fees) == 0 { return FeeDistribution{}, nil } + input := stats.LoadRawData(fees) - max, err := input.Max() + + max, min, mode, err := computeBasicStats(input, fees) if err != nil { return FeeDistribution{}, err } - min, err := input.Min() + + percentiles, err := computePercentiles(input) if err != nil { return FeeDistribution{}, err } + + return FeeDistribution{ + Max: uint64(max), + Min: uint64(min), + Mode: mode, + P10: uint64(percentiles[0]), + P20: uint64(percentiles[1]), + P30: uint64(percentiles[2]), + P40: uint64(percentiles[3]), + P50: uint64(percentiles[4]), + P60: uint64(percentiles[5]), + P70: uint64(percentiles[6]), + P80: uint64(percentiles[7]), + P90: uint64(percentiles[8]), + P95: uint64(percentiles[9]), + P99: uint64(percentiles[10]), + FeeCount: uint32(len(fees)), + LedgerCount: ledgerCount, + }, nil +} + +func computeBasicStats(input stats.Float64Data, fees []uint64) (float64, float64, uint64, error) { + max, err := input.Max() + if err != nil { + return 0, 0, 0, err + } + + min, err := input.Min() + if err != nil { + return 0, 0, 0, err + } + modeSeq, err := input.Mode() if err != nil { - return FeeDistribution{}, err + return 0, 0, 0, err } + var mode uint64 if len(modeSeq) == 0 { - // mode can have length 0 if no value is repeated more than the rest slices.Sort(fees) mode = fees[0] } else { mode = uint64(modeSeq[0]) } - p10, err := input.PercentileNearestRank(float64(10)) - if err != nil { - return FeeDistribution{}, err - } - p20, err := input.PercentileNearestRank(float64(20)) - if err != nil { - return FeeDistribution{}, err - } - p30, err := input.PercentileNearestRank(float64(30)) - if err != nil { - return FeeDistribution{}, err - } - p40, err := input.PercentileNearestRank(float64(40)) - if err != nil { - return FeeDistribution{}, err - } - p50, err := input.PercentileNearestRank(float64(50)) - if err != nil { - return FeeDistribution{}, err - } - p60, err := input.PercentileNearestRank(float64(60)) - if err != nil { - return FeeDistribution{}, err - } - p70, err := input.PercentileNearestRank(float64(70)) - if err != nil { - return FeeDistribution{}, err - } - p80, err := input.PercentileNearestRank(float64(80)) - if err != nil { - return FeeDistribution{}, err - } - p90, err := input.PercentileNearestRank(float64(90)) - if err != nil { - return FeeDistribution{}, err - } - p95, err := input.PercentileNearestRank(float64(95)) - if err != nil { - return FeeDistribution{}, err - } - p99, err := input.PercentileNearestRank(float64(99)) - if err != nil { - return FeeDistribution{}, err - } - result := FeeDistribution{ - Max: uint64(max), - Min: uint64(min), - Mode: mode, - P10: uint64(p10), - P20: uint64(p20), - P30: uint64(p30), - P40: uint64(p40), - P50: uint64(p50), - P60: uint64(p60), - P70: uint64(p70), - P80: uint64(p80), - P90: uint64(p90), - P95: uint64(p95), - P99: uint64(p99), - FeeCount: uint32(len(fees)), - LedgerCount: ledgerCount, + return max, min, mode, nil +} + +func computePercentiles(input stats.Float64Data) ([]float64, error) { + percentiles := []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99} + results := make([]float64, len(percentiles)) + + for i, p := range percentiles { + result, err := input.PercentileNearestRank(p) + if err != nil { + return nil, err + } + results[i] = result } - return result, nil + + return results, nil } diff --git a/cmd/soroban-rpc/internal/ingest/ledgerentry.go b/cmd/soroban-rpc/internal/ingest/ledgerentry.go index d9fdc35c..49b1406d 100644 --- a/cmd/soroban-rpc/internal/ingest/ledgerentry.go +++ b/cmd/soroban-rpc/internal/ingest/ledgerentry.go @@ -2,7 +2,10 @@ package ingest import ( "context" + "errors" + "fmt" "io" + "reflect" "strings" "time" @@ -14,14 +17,16 @@ import ( "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/db" ) -func (s *Service) ingestLedgerEntryChanges(ctx context.Context, reader ingest.ChangeReader, tx db.WriteTx, progressLogPeriod int) error { +func (s *Service) ingestLedgerEntryChanges(ctx context.Context, reader ingest.ChangeReader, + tx db.WriteTx, progressLogPeriod int, +) error { entryCount := 0 startTime := time.Now() writer := tx.LedgerEntryWriter() changeStatsProcessor := ingest.StatsChangeProcessor{} for ctx.Err() == nil { - if change, err := reader.Read(); err == io.EOF { + if change, err := reader.Read(); errors.Is(err, io.EOF) { return nil } else if err != nil { return err @@ -39,8 +44,14 @@ func (s *Service) ingestLedgerEntryChanges(ctx context.Context, reader ingest.Ch results := changeStatsProcessor.GetResults() for stat, value := range results.Map() { stat = strings.Replace(stat, "stats_", "change_", 1) - s.metrics.ledgerStatsMetric. - With(prometheus.Labels{"type": stat}).Add(float64(value.(int64))) + if intValue, ok := value.(int64); ok { + s.metrics.ledgerStatsMetric. + With(prometheus.Labels{"type": stat}).Add(float64(intValue)) + } else { + // Handle the case where the type assertion failed + return fmt.Errorf("unexpected type for ledger stats metric. Expected int64, "+ + "got %s", reflect.TypeOf(value)) + } } s.metrics.ingestionDurationMetric. With(prometheus.Labels{"type": "ledger_entries"}).Observe(time.Since(startTime).Seconds()) @@ -82,7 +93,6 @@ func ingestLedgerEntryChange(writer db.LedgerEntryWriter, change ingest.Change) return err } return writer.DeleteLedgerEntry(ledgerKey) - } else { - return writer.UpsertLedgerEntry(*change.Post) } + return writer.UpsertLedgerEntry(*change.Post) } diff --git a/cmd/soroban-rpc/internal/ingest/mock_db_test.go b/cmd/soroban-rpc/internal/ingest/mock_db_test.go index 7fdaf4cd..981deafc 100644 --- a/cmd/soroban-rpc/internal/ingest/mock_db_test.go +++ b/cmd/soroban-rpc/internal/ingest/mock_db_test.go @@ -24,12 +24,12 @@ type MockDB struct { func (m *MockDB) NewTx(ctx context.Context) (db.WriteTx, error) { args := m.Called(ctx) - return args.Get(0).(db.WriteTx), args.Error(1) + return args.Get(0).(db.WriteTx), args.Error(1) //nolint:forcetypeassert } func (m *MockDB) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { args := m.Called(ctx) - return args.Get(0).(uint32), args.Error(1) + return args.Get(0).(uint32), args.Error(1) //nolint:forcetypeassert } type MockTx struct { @@ -47,17 +47,17 @@ func (m *MockTx) EventWriter() db.EventWriter { func (m *MockTx) LedgerEntryWriter() db.LedgerEntryWriter { args := m.Called() - return args.Get(0).(db.LedgerEntryWriter) + return args.Get(0).(db.LedgerEntryWriter) //nolint:forcetypeassert } func (m *MockTx) LedgerWriter() db.LedgerWriter { args := m.Called() - return args.Get(0).(db.LedgerWriter) + return args.Get(0).(db.LedgerWriter) //nolint:forcetypeassert } func (m *MockTx) TransactionWriter() db.TransactionWriter { args := m.Called() - return args.Get(0).(db.TransactionWriter) + return args.Get(0).(db.TransactionWriter) //nolint:forcetypeassert } func (m *MockTx) Commit(ledgerCloseMeta xdr.LedgerCloseMeta) error { diff --git a/cmd/soroban-rpc/internal/ingest/service.go b/cmd/soroban-rpc/internal/ingest/service.go index 987757f1..92eb74d1 100644 --- a/cmd/soroban-rpc/internal/ingest/service.go +++ b/cmd/soroban-rpc/internal/ingest/service.go @@ -3,7 +3,6 @@ package ingest import ( "context" "errors" - "fmt" "sync" "time" @@ -25,9 +24,11 @@ import ( const ( ledgerEntryBaselineProgressLogPeriod = 10000 + maxRetries = 5 ) -var errEmptyArchives = fmt.Errorf("cannot start ingestion without history archives, wait until first history archives are published") +var errEmptyArchives = errors.New("cannot start ingestion without history archives, " + + "wait until first history archives are published") type Config struct { Logger *log.Entry @@ -52,7 +53,7 @@ func newService(cfg Config) *Service { ingestionDurationMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: cfg.Daemon.MetricsNamespace(), Subsystem: "ingest", Name: "ledger_ingestion_duration_seconds", Help: "ledger ingestion durations, sliding window = 10m", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }, []string{"type"}, ) @@ -101,7 +102,7 @@ func startService(service *Service, cfg Config) { panicGroup.Go(func() { defer service.wg.Done() // Retry running ingestion every second for 5 seconds. - constantBackoff := backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), 5) + constantBackoff := backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), maxRetries) // Don't want to keep retrying if the context gets canceled. contextBackoff := backoff.WithContext(constantBackoff, ctx) err := backoff.RetryNotify( @@ -165,23 +166,27 @@ func (s *Service) run(ctx context.Context, archive historyarchive.ArchiveInterfa } } -func (s *Service) maybeFillEntriesFromCheckpoint(ctx context.Context, archive historyarchive.ArchiveInterface) (uint32, chan error, error) { +func (s *Service) maybeFillEntriesFromCheckpoint(ctx context.Context, + archive historyarchive.ArchiveInterface, +) (uint32, chan error, error) { checkPointFillErr := make(chan error, 1) // Skip creating a ledger-entry baseline if the DB was initialized curLedgerSeq, err := s.db.GetLatestLedgerSequence(ctx) - if err == db.ErrEmptyDB { + if errors.Is(err, db.ErrEmptyDB) { var checkpointLedger uint32 - if root, rootErr := archive.GetRootHAS(); rootErr != nil { + root, rootErr := archive.GetRootHAS() + if rootErr != nil { return 0, checkPointFillErr, rootErr - } else if root.CurrentLedger == 0 { + } + if root.CurrentLedger == 0 { return 0, checkPointFillErr, errEmptyArchives - } else { - checkpointLedger = root.CurrentLedger } + checkpointLedger = root.CurrentLedger // DB is empty, let's fill it from the History Archive, using the latest available checkpoint // Do it in parallel with the upcoming captive core preparation to save time - s.logger.Infof("found an empty database, creating ledger-entry baseline from the most recent checkpoint (%d). This can take up to 30 minutes, depending on the network", checkpointLedger) + s.logger.Infof("found an empty database, creating ledger-entry baseline from the most recent "+ + "checkpoint (%d). This can take up to 30 minutes, depending on the network", checkpointLedger) panicGroup := util.UnrecoverablePanicGroup.Log(s.logger) panicGroup.Go(func() { checkPointFillErr <- s.fillEntriesFromCheckpoint(ctx, archive, checkpointLedger) @@ -189,18 +194,19 @@ func (s *Service) maybeFillEntriesFromCheckpoint(ctx context.Context, archive hi return checkpointLedger + 1, checkPointFillErr, nil } else if err != nil { return 0, checkPointFillErr, err - } else { - checkPointFillErr <- nil - nextLedgerSeq := curLedgerSeq + 1 - prepareRangeCtx, cancelPrepareRange := context.WithTimeout(ctx, s.timeout) - defer cancelPrepareRange() - return nextLedgerSeq, - checkPointFillErr, - s.ledgerBackend.PrepareRange(prepareRangeCtx, backends.UnboundedRange(nextLedgerSeq)) } + checkPointFillErr <- nil + nextLedgerSeq := curLedgerSeq + 1 + prepareRangeCtx, cancelPrepareRange := context.WithTimeout(ctx, s.timeout) + defer cancelPrepareRange() + return nextLedgerSeq, + checkPointFillErr, + s.ledgerBackend.PrepareRange(prepareRangeCtx, backends.UnboundedRange(nextLedgerSeq)) } -func (s *Service) fillEntriesFromCheckpoint(ctx context.Context, archive historyarchive.ArchiveInterface, checkpointLedger uint32) error { +func (s *Service) fillEntriesFromCheckpoint(ctx context.Context, archive historyarchive.ArchiveInterface, + checkpointLedger uint32, +) error { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, s.timeout) defer cancel() @@ -225,7 +231,7 @@ func (s *Service) fillEntriesFromCheckpoint(ctx context.Context, archive history if !transactionCommitted { // Internally, we might already have rolled back the transaction. We should // not generate benign error/warning here in case the transaction was already rolled back. - if rollbackErr := tx.Rollback(); rollbackErr != nil && rollbackErr != supportdb.ErrAlreadyRolledback { + if rollbackErr := tx.Rollback(); rollbackErr != nil && !errors.Is(rollbackErr, supportdb.ErrAlreadyRolledback) { s.logger.WithError(rollbackErr).Warn("could not rollback fillEntriesFromCheckpoint write transactions") } } diff --git a/cmd/soroban-rpc/internal/ingest/service_test.go b/cmd/soroban-rpc/internal/ingest/service_test.go index 92b7753b..566616a2 100644 --- a/cmd/soroban-rpc/internal/ingest/service_test.go +++ b/cmd/soroban-rpc/internal/ingest/service_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/network" @@ -22,11 +23,11 @@ import ( type ErrorReadWriter struct{} -func (rw *ErrorReadWriter) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { +func (rw *ErrorReadWriter) GetLatestLedgerSequence(_ context.Context) (uint32, error) { return 0, errors.New("could not get latest ledger sequence") } -func (rw *ErrorReadWriter) NewTx(ctx context.Context) (db.WriteTx, error) { +func (rw *ErrorReadWriter) NewTx(_ context.Context) (db.WriteTx, error) { return nil, errors.New("could not create new tx") } @@ -36,7 +37,7 @@ func TestRetryRunningIngestion(t *testing.T) { numRetries := 0 var lastErr error - incrementRetry := func(err error, dur time.Duration) { + incrementRetry := func(err error, _ time.Duration) { defer retryWg.Done() numRetries++ lastErr = err @@ -55,14 +56,32 @@ func TestRetryRunningIngestion(t *testing.T) { retryWg.Wait() service.Close() assert.Equal(t, 1, numRetries) - assert.Error(t, lastErr) - assert.ErrorContains(t, lastErr, "could not get latest ledger sequence") + require.Error(t, lastErr) + require.ErrorContains(t, lastErr, "could not get latest ledger sequence") } func TestIngestion(t *testing.T) { + ctx := context.Background() + mockDB, mockLedgerBackend, mockTx := setupMocks() + service := setupService(mockDB, mockLedgerBackend) + sequence := uint32(3) + + ledger := createTestLedger(t) + setupMockExpectations(ctx, t, mockDB, mockLedgerBackend, mockTx, ledger, sequence) + + require.NoError(t, service.ingest(ctx, sequence)) + + assertMockExpectations(t, mockDB, mockTx, mockLedgerBackend) +} + +func setupMocks() (*MockDB, *ledgerbackend.MockDatabaseBackend, *MockTx) { mockDB := &MockDB{} mockLedgerBackend := &ledgerbackend.MockDatabaseBackend{} + mockTx := &MockTx{} + return mockDB, mockLedgerBackend, mockTx +} +func setupService(mockDB *MockDB, mockLedgerBackend *ledgerbackend.MockDatabaseBackend) *Service { daemon := interfaces.MakeNoOpDeamon() config := Config{ Logger: supportlog.New(), @@ -72,17 +91,54 @@ func TestIngestion(t *testing.T) { Daemon: daemon, NetworkPassPhrase: network.TestNetworkPassphrase, } - sequence := uint32(3) - service := newService(config) - mockTx := &MockTx{} - mockLedgerEntryWriter := &MockLedgerEntryWriter{} - mockLedgerWriter := &MockLedgerWriter{} - mockTxWriter := &MockTransactionWriter{} - mockEventWriter := &MockEventWriter{} - ctx := context.Background() + return newService(config) +} + +func createTestLedger(t *testing.T) xdr.LedgerCloseMeta { + return xdr.LedgerCloseMeta{ + V: 1, + V1: &xdr.LedgerCloseMetaV1{ + LedgerHeader: createLedgerHeader(), + TxSet: createTransactionSet(), + TxProcessing: createTransactionProcessing(t), + UpgradesProcessing: []xdr.UpgradeEntryMeta{}, + EvictedTemporaryLedgerKeys: []xdr.LedgerKey{createEvictedTempLedgerKey()}, + EvictedPersistentLedgerEntries: []xdr.LedgerEntry{createEvictedPersistentLedgerEntry()}, + }, + } +} +func createLedgerHeader() xdr.LedgerHeaderHistoryEntry { + return xdr.LedgerHeaderHistoryEntry{Header: xdr.LedgerHeader{LedgerVersion: 10}} +} + +func createTransactionSet() xdr.GeneralizedTransactionSet { + firstTx := createFirstTransaction() + baseFee := xdr.Int64(100) + return xdr.GeneralizedTransactionSet{ + V: 1, + V1TxSet: &xdr.TransactionSetV1{ + PreviousLedgerHash: xdr.Hash{1, 2, 3}, + Phases: []xdr.TransactionPhase{ + { + V0Components: &[]xdr.TxSetComponent{ + { + Type: xdr.TxSetComponentTypeTxsetCompTxsMaybeDiscountedFee, + TxsMaybeDiscountedFee: &xdr.TxSetComponentTxsMaybeDiscountedFee{ + BaseFee: &baseFee, + Txs: []xdr.TransactionEnvelope{firstTx}, + }, + }, + }, + }, + }, + }, + } +} + +func createFirstTransaction() xdr.TransactionEnvelope { src := xdr.MustAddress("GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON") - firstTx := xdr.TransactionEnvelope{ + return xdr.TransactionEnvelope{ Type: xdr.EnvelopeTypeEnvelopeTypeTx, V1: &xdr.TransactionV1Envelope{ Tx: xdr.Transaction{ @@ -91,150 +147,135 @@ func TestIngestion(t *testing.T) { }, }, } +} + +func createTransactionProcessing(t *testing.T) []xdr.TransactionResultMeta { + firstTx := createFirstTransaction() firstTxHash, err := network.HashTransactionInEnvelope(firstTx, network.TestNetworkPassphrase) - assert.NoError(t, err) + require.NoError(t, err) - baseFee := xdr.Int64(100) - tempKey := xdr.ScSymbol("TEMPKEY") + return []xdr.TransactionResultMeta{ + { + Result: xdr.TransactionResultPair{ + TransactionHash: firstTxHash, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Results: &[]xdr.OperationResult{}, + }, + }, + }, + FeeProcessing: xdr.LedgerEntryChanges{}, + TxApplyProcessing: xdr.TransactionMeta{ + V: 3, + V3: &xdr.TransactionMetaV3{ + Operations: []xdr.OperationMeta{ + { + Changes: createOperationChanges(), + }, + }, + }, + }, + }, + } +} + +func createOperationChanges() xdr.LedgerEntryChanges { + contractAddress := createContractAddress() persistentKey := xdr.ScSymbol("TEMPVAL") - contractIDBytes, err := hex.DecodeString("df06d62447fd25da07c0135eed7557e5a5497ee7d15b7fe345bd47e191d8f577") - assert.NoError(t, err) + + return xdr.LedgerEntryChanges{ + createLedgerEntryState(contractAddress, persistentKey, true), + createLedgerEntryUpdated(contractAddress, persistentKey, true), + } +} + +func createContractAddress() xdr.ScAddress { + contractIDBytes, _ := hex.DecodeString("df06d62447fd25da07c0135eed7557e5a5497ee7d15b7fe345bd47e191d8f577") var contractID xdr.Hash copy(contractID[:], contractIDBytes) - contractAddress := xdr.ScAddress{ + return xdr.ScAddress{ Type: xdr.ScAddressTypeScAddressTypeContract, ContractId: &contractID, } - xdrTrue := true - operationChanges := xdr.LedgerEntryChanges{ - { - Type: xdr.LedgerEntryChangeTypeLedgerEntryState, - State: &xdr.LedgerEntry{ - LastModifiedLedgerSeq: 1, - Data: xdr.LedgerEntryData{ - Type: xdr.LedgerEntryTypeContractData, - ContractData: &xdr.ContractDataEntry{ - Contract: contractAddress, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvSymbol, - Sym: &persistentKey, - }, - Durability: xdr.ContractDataDurabilityPersistent, - Val: xdr.ScVal{ - Type: xdr.ScValTypeScvBool, - B: &xdrTrue, - }, - }, +} + +func createLedgerEntryState(contractAddress xdr.ScAddress, key xdr.ScSymbol, value bool) xdr.LedgerEntryChange { + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeContractData, + ContractData: &xdr.ContractDataEntry{ + Contract: contractAddress, + Key: xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &key}, + Durability: xdr.ContractDataDurabilityPersistent, + Val: xdr.ScVal{Type: xdr.ScValTypeScvBool, B: &value}, }, }, }, - { - Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, - Updated: &xdr.LedgerEntry{ - LastModifiedLedgerSeq: 1, - Data: xdr.LedgerEntryData{ - Type: xdr.LedgerEntryTypeContractData, - ContractData: &xdr.ContractDataEntry{ - Contract: xdr.ScAddress{ - Type: xdr.ScAddressTypeScAddressTypeContract, - ContractId: &contractID, - }, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvSymbol, - Sym: &persistentKey, - }, - Durability: xdr.ContractDataDurabilityPersistent, - Val: xdr.ScVal{ - Type: xdr.ScValTypeScvBool, - B: &xdrTrue, - }, - }, + } +} + +func createLedgerEntryUpdated(contractAddress xdr.ScAddress, key xdr.ScSymbol, value bool) xdr.LedgerEntryChange { + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeContractData, + ContractData: &xdr.ContractDataEntry{ + Contract: contractAddress, + Key: xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &key}, + Durability: xdr.ContractDataDurabilityPersistent, + Val: xdr.ScVal{Type: xdr.ScValTypeScvBool, B: &value}, }, }, }, } - evictedPersistentLedgerEntry := xdr.LedgerEntry{ +} + +func createEvictedPersistentLedgerEntry() xdr.LedgerEntry { + contractAddress := createContractAddress() + persistentKey := xdr.ScSymbol("TEMPVAL") + xdrTrue := true + + return xdr.LedgerEntry{ LastModifiedLedgerSeq: 123, Data: xdr.LedgerEntryData{ Type: xdr.LedgerEntryTypeContractData, ContractData: &xdr.ContractDataEntry{ - Contract: contractAddress, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvSymbol, - Sym: &persistentKey, - }, + Contract: contractAddress, + Key: xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &persistentKey}, Durability: xdr.ContractDataDurabilityTemporary, - Val: xdr.ScVal{ - Type: xdr.ScValTypeScvBool, - B: &xdrTrue, - }, + Val: xdr.ScVal{Type: xdr.ScValTypeScvBool, B: &xdrTrue}, }, }, } - evictedTempLedgerKey := xdr.LedgerKey{ +} + +func createEvictedTempLedgerKey() xdr.LedgerKey { + contractAddress := createContractAddress() + tempKey := xdr.ScSymbol("TEMPKEY") + + return xdr.LedgerKey{ Type: xdr.LedgerEntryTypeContractData, ContractData: &xdr.LedgerKeyContractData{ - Contract: contractAddress, - Key: xdr.ScVal{ - Type: xdr.ScValTypeScvSymbol, - Sym: &tempKey, - }, + Contract: contractAddress, + Key: xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &tempKey}, Durability: xdr.ContractDataDurabilityTemporary, }, } - ledger := xdr.LedgerCloseMeta{ - V: 1, - V1: &xdr.LedgerCloseMetaV1{ - LedgerHeader: xdr.LedgerHeaderHistoryEntry{Header: xdr.LedgerHeader{LedgerVersion: 10}}, - TxSet: xdr.GeneralizedTransactionSet{ - V: 1, - V1TxSet: &xdr.TransactionSetV1{ - PreviousLedgerHash: xdr.Hash{1, 2, 3}, - Phases: []xdr.TransactionPhase{ - { - V0Components: &[]xdr.TxSetComponent{ - { - Type: xdr.TxSetComponentTypeTxsetCompTxsMaybeDiscountedFee, - TxsMaybeDiscountedFee: &xdr.TxSetComponentTxsMaybeDiscountedFee{ - BaseFee: &baseFee, - Txs: []xdr.TransactionEnvelope{ - firstTx, - }, - }, - }, - }, - }, - }, - }, - }, - TxProcessing: []xdr.TransactionResultMeta{ - { - Result: xdr.TransactionResultPair{ - TransactionHash: firstTxHash, - Result: xdr.TransactionResult{ - Result: xdr.TransactionResultResult{ - Results: &[]xdr.OperationResult{}, - }, - }, - }, - FeeProcessing: xdr.LedgerEntryChanges{}, - TxApplyProcessing: xdr.TransactionMeta{ - V: 3, - V3: &xdr.TransactionMetaV3{ - Operations: []xdr.OperationMeta{ - { - Changes: operationChanges, - }, - }, - }, - }, - }, - }, - UpgradesProcessing: []xdr.UpgradeEntryMeta{}, - EvictedTemporaryLedgerKeys: []xdr.LedgerKey{evictedTempLedgerKey}, - EvictedPersistentLedgerEntries: []xdr.LedgerEntry{evictedPersistentLedgerEntry}, - }, - } +} + +func setupMockExpectations(ctx context.Context, t *testing.T, mockDB *MockDB, + mockLedgerBackend *ledgerbackend.MockDatabaseBackend, mockTx *MockTx, ledger xdr.LedgerCloseMeta, sequence uint32, +) { + mockLedgerEntryWriter := &MockLedgerEntryWriter{} + mockLedgerWriter := &MockLedgerWriter{} + mockTxWriter := &MockTransactionWriter{} + mockEventWriter := &MockEventWriter{} + mockDB.On("NewTx", ctx).Return(mockTx, nil).Once() mockTx.On("Commit", ledger).Return(nil).Once() mockTx.On("Rollback").Return(nil).Once() @@ -244,22 +285,35 @@ func TestIngestion(t *testing.T) { mockTx.On("EventWriter").Return(mockEventWriter).Once() mockLedgerBackend.On("GetLedger", ctx, sequence).Return(ledger, nil).Once() + + setupLedgerEntryWriterExpectations(t, mockLedgerEntryWriter, ledger) + mockLedgerWriter.On("InsertLedger", ledger).Return(nil).Once() + mockTxWriter.On("InsertTransactions", ledger).Return(nil).Once() + mockEventWriter.On("InsertEvents", ledger).Return(nil).Once() +} + +func setupLedgerEntryWriterExpectations(t *testing.T, mockLedgerEntryWriter *MockLedgerEntryWriter, + ledger xdr.LedgerCloseMeta, +) { + operationChanges := ledger.V1.TxProcessing[0].TxApplyProcessing.V3.Operations[0].Changes mockLedgerEntryWriter.On("UpsertLedgerEntry", operationChanges[1].MustUpdated()). Return(nil).Once() - evictedPresistentLedgerKey, err := evictedPersistentLedgerEntry.LedgerKey() - assert.NoError(t, err) - mockLedgerEntryWriter.On("DeleteLedgerEntry", evictedPresistentLedgerKey). + + evictedPersistentLedgerEntry := ledger.V1.EvictedPersistentLedgerEntries[0] + evictedPersistentLedgerKey, err := evictedPersistentLedgerEntry.LedgerKey() + require.NoError(t, err) + mockLedgerEntryWriter.On("DeleteLedgerEntry", evictedPersistentLedgerKey). Return(nil).Once() + + evictedTempLedgerKey := ledger.V1.EvictedTemporaryLedgerKeys[0] mockLedgerEntryWriter.On("DeleteLedgerEntry", evictedTempLedgerKey). Return(nil).Once() - mockLedgerWriter.On("InsertLedger", ledger).Return(nil).Once() - mockTxWriter.On("InsertTransactions", ledger).Return(nil).Once() - mockEventWriter.On("InsertEvents", ledger).Return(nil).Once() - assert.NoError(t, service.ingest(ctx, sequence)) +} +func assertMockExpectations(t *testing.T, mockDB *MockDB, mockTx *MockTx, + mockLedgerBackend *ledgerbackend.MockDatabaseBackend, +) { mockDB.AssertExpectations(t) mockTx.AssertExpectations(t) - mockLedgerEntryWriter.AssertExpectations(t) - mockLedgerWriter.AssertExpectations(t) mockLedgerBackend.AssertExpectations(t) } diff --git a/cmd/soroban-rpc/internal/integrationtest/metrics_test.go b/cmd/soroban-rpc/internal/integrationtest/metrics_test.go index 930a6ed8..59c148ab 100644 --- a/cmd/soroban-rpc/internal/integrationtest/metrics_test.go +++ b/cmd/soroban-rpc/internal/integrationtest/metrics_test.go @@ -8,12 +8,11 @@ import ( "runtime" "testing" + "github.com/pkg/errors" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stellar/go/support/errors" - "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/config" "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/integrationtest/infrastructure" ) diff --git a/cmd/soroban-rpc/internal/ledgerbucketwindow/ledgerbucketwindow_test.go b/cmd/soroban-rpc/internal/ledgerbucketwindow/ledgerbucketwindow_test.go index 2e50ed6d..3423350f 100644 --- a/cmd/soroban-rpc/internal/ledgerbucketwindow/ledgerbucketwindow_test.go +++ b/cmd/soroban-rpc/internal/ledgerbucketwindow/ledgerbucketwindow_test.go @@ -98,7 +98,8 @@ func TestAppendError(t *testing.T) { require.NoError(t, err) require.Nil(t, evicted) - evicted, err = m.Append(bucket(1)) + _, err = m.Append(bucket(1)) require.Error(t, err) - require.Contains(t, err.Error(), "error appending ledgers: ledgers not contiguous: expected ledger sequence 6 but received 1") + require.Contains(t, err.Error(), "error appending ledgers: ledgers not contiguous: "+ + "expected ledger sequence 6 but received 1") } diff --git a/cmd/soroban-rpc/internal/methods/get_events.go b/cmd/soroban-rpc/internal/methods/get_events.go index 8cf445a8..a109d6b0 100644 --- a/cmd/soroban-rpc/internal/methods/get_events.go +++ b/cmd/soroban-rpc/internal/methods/get_events.go @@ -9,10 +9,10 @@ import ( "time" "github.com/creachadair/jrpc2" + "github.com/pkg/errors" "github.com/stellar/go/strkey" "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/support/errors" "github.com/stellar/go/support/log" "github.com/stellar/go/xdr" @@ -81,7 +81,7 @@ func (e eventTypeSet) matches(event xdr.ContractEvent) bool { if len(e) == 0 { return true } - _, ok := e[eventTypeFromXDR[event.Type]] + _, ok := e[getEventTypeFromEventTypeXDR()[event.Type]] return ok } @@ -122,7 +122,7 @@ func (g *GetEventsRequest) Valid(maxLimit uint) error { // Validate the paging limit (if it exists) if g.Pagination != nil && g.Pagination.Cursor != nil { if g.StartLedger != 0 || g.EndLedger != 0 { - return errors.New("ledger ranges and cursor cannot both be set") //nolint:forbidigo + return errors.New("ledger ranges and cursor cannot both be set") } } else if g.StartLedger <= 0 { return errors.New("startLedger must be positive") @@ -163,10 +163,12 @@ const ( EventTypeDiagnostic = "diagnostic" ) -var eventTypeFromXDR = map[xdr.ContractEventType]string{ - xdr.ContractEventTypeSystem: EventTypeSystem, - xdr.ContractEventTypeContract: EventTypeContract, - xdr.ContractEventTypeDiagnostic: EventTypeDiagnostic, +func getEventTypeFromEventTypeXDR() map[xdr.ContractEventType]string { + return map[xdr.ContractEventType]string{ + xdr.ContractEventTypeSystem: EventTypeSystem, + xdr.ContractEventTypeContract: EventTypeContract, + xdr.ContractEventTypeDiagnostic: EventTypeDiagnostic, + } } func getEventTypeXDRFromEventType() map[string]xdr.ContractEventType { @@ -543,7 +545,7 @@ func eventInfoForEvent( return EventInfo{}, errors.New("unknown event version") } - eventType, ok := eventTypeFromXDR[event.Event.Type] + eventType, ok := getEventTypeFromEventTypeXDR()[event.Event.Type] if !ok { return EventInfo{}, fmt.Errorf("unknown XDR ContractEventType type: %d", event.Event.Type) } @@ -620,7 +622,5 @@ func NewGetEventsHandler( logger: logger, ledgerReader: ledgerReader, } - return NewHandler(func(ctx context.Context, request GetEventsRequest) (GetEventsResponse, error) { - return eventsHandler.getEvents(ctx, request) - }) + return NewHandler(eventsHandler.getEvents) } diff --git a/cmd/soroban-rpc/internal/methods/get_events_test.go b/cmd/soroban-rpc/internal/methods/get_events_test.go index efef6381..e125eb37 100644 --- a/cmd/soroban-rpc/internal/methods/get_events_test.go +++ b/cmd/soroban-rpc/internal/methods/get_events_test.go @@ -159,7 +159,7 @@ func TestEventTypeSetValid(t *testing.T) { if testCase.expectedError { assert.Error(t, set.valid()) } else { - assert.NoError(t, set.valid()) + require.NoError(t, set.valid()) } }) } @@ -190,9 +190,9 @@ func TestEventTypeSetMarshaling(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { var set eventTypeSet input, err := json.Marshal(testCase.input) - assert.NoError(t, err) + require.NoError(t, err) err = set.UnmarshalJSON(input) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, len(testCase.expected), len(set)) for _, val := range testCase.expected { _, ok := set[val] @@ -370,18 +370,18 @@ func TestTopicFilterMatches(t *testing.T) { func TestTopicFilterJSON(t *testing.T) { var got TopicFilter - assert.NoError(t, json.Unmarshal([]byte("[]"), &got)) + require.NoError(t, json.Unmarshal([]byte("[]"), &got)) assert.Equal(t, TopicFilter{}, got) star := "*" - assert.NoError(t, json.Unmarshal([]byte("[\"*\"]"), &got)) + require.NoError(t, json.Unmarshal([]byte("[\"*\"]"), &got)) assert.Equal(t, TopicFilter{{wildcard: &star}}, got) sixtyfour := xdr.Uint64(64) scval := xdr.ScVal{Type: xdr.ScValTypeScvU64, U64: &sixtyfour} scvalstr, err := xdr.MarshalBase64(scval) - assert.NoError(t, err) - assert.NoError(t, json.Unmarshal([]byte(fmt.Sprintf("[%q]", scvalstr)), &got)) + require.NoError(t, err) + require.NoError(t, json.Unmarshal([]byte(fmt.Sprintf("[%q]", scvalstr)), &got)) assert.Equal(t, TopicFilter{{scval: &scval}}, got) } @@ -409,38 +409,38 @@ func topicFilterToString(t TopicFilter) string { func TestGetEventsRequestValid(t *testing.T) { // omit startLedger but include cursor var request GetEventsRequest - assert.NoError(t, json.Unmarshal( + require.NoError(t, json.Unmarshal( []byte("{ \"filters\": [], \"pagination\": { \"cursor\": \"0000000021474840576-0000000000\"} }"), &request, )) assert.Equal(t, uint32(0), request.StartLedger) - assert.NoError(t, request.Valid(1000)) + require.NoError(t, request.Valid(1000)) - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{}, Pagination: &PaginationOptions{Cursor: &db.Cursor{}}, }).Valid(1000), "ledger ranges and cursor cannot both be set") - assert.NoError(t, (&GetEventsRequest{ + require.NoError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{}, Pagination: nil, }).Valid(1000)) - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{}, Pagination: &PaginationOptions{Limit: 1001}, }).Valid(1000), "limit must not exceed 1000") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 0, Filters: []EventFilter{}, Pagination: nil, }).Valid(1000), "startLedger must be positive") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {}, {}, {}, {}, {}, {}, @@ -448,7 +448,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "maximum 5 filters per request") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {EventType: map[string]interface{}{"foo": nil}}, @@ -456,7 +456,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: filter type invalid: if set, type must be either 'system', 'contract' or 'diagnostic'") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {ContractIDs: []string{ @@ -471,7 +471,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: maximum 5 contract IDs per filter") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {ContractIDs: []string{"a"}}, @@ -479,7 +479,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: contract ID 1 invalid") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {ContractIDs: []string{"CCVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVKVINVALID"}}, @@ -487,7 +487,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: contract ID 1 invalid") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ { @@ -499,7 +499,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: maximum 5 topics per filter") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {Topics: []TopicFilter{ @@ -509,7 +509,7 @@ func TestGetEventsRequestValid(t *testing.T) { Pagination: nil, }).Valid(1000), "filter 1 invalid: topic 1 invalid: topic must have at least one segment") - assert.EqualError(t, (&GetEventsRequest{ + require.EqualError(t, (&GetEventsRequest{ StartLedger: 1, Filters: []EventFilter{ {Topics: []TopicFilter{ @@ -531,7 +531,7 @@ func TestGetEvents(t *testing.T) { counter := xdr.ScSymbol("COUNTER") counterScVal := xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &counter} counterXdr, err := xdr.MarshalBase64(counterScVal) - assert.NoError(t, err) + require.NoError(t, err) t.Run("startLedger validation", func(t *testing.T) { contractID := xdr.Hash([32]byte{}) @@ -563,7 +563,7 @@ func TestGetEvents(t *testing.T) { ledgerCloseMeta := ledgerCloseMetaWithEvents(2, now.Unix(), txMeta...) require.NoError(t, ledgerW.InsertLedger(ledgerCloseMeta), "ingestion failed for ledger ") - assert.NoError(t, eventW.InsertEvents(ledgerCloseMeta)) + require.NoError(t, eventW.InsertEvents(ledgerCloseMeta)) require.NoError(t, write.Commit(ledgerCloseMeta)) handler := eventsRPCHandler{ @@ -616,7 +616,7 @@ func TestGetEvents(t *testing.T) { ledgerCloseMeta := ledgerCloseMetaWithEvents(1, now.Unix(), txMeta...) require.NoError(t, ledgerW.InsertLedger(ledgerCloseMeta), "ingestion failed for ledger ") - assert.NoError(t, eventW.InsertEvents(ledgerCloseMeta)) + require.NoError(t, eventW.InsertEvents(ledgerCloseMeta)) require.NoError(t, write.Commit(ledgerCloseMeta)) handler := eventsRPCHandler{ @@ -628,7 +628,7 @@ func TestGetEvents(t *testing.T) { results, err := handler.getEvents(context.TODO(), GetEventsRequest{ StartLedger: 1, }) - assert.NoError(t, err) + require.NoError(t, err) var expected []EventInfo for i := range txMeta { @@ -642,7 +642,7 @@ func TestGetEvents(t *testing.T) { Type: xdr.ScValTypeScvSymbol, Sym: &counter, }) - assert.NoError(t, err) + require.NoError(t, err) expected = append(expected, EventInfo{ EventType: EventTypeContract, Ledger: 1, @@ -678,7 +678,7 @@ func TestGetEvents(t *testing.T) { xdr.Hash([32]byte{}), xdr.Hash([32]byte{1}), } - for i := 0; i < 5; i++ { + for i := range 5 { txMeta = append(txMeta, transactionMetaWithEvents( contractEvent( contractIDs[i%len(contractIDs)], @@ -711,7 +711,7 @@ func TestGetEvents(t *testing.T) { {ContractIDs: []string{strkey.MustEncode(strkey.VersionByteContract, contractIDs[0][:])}}, }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, uint32(1), results.LatestLedger) expectedIds := []string{ @@ -779,16 +779,16 @@ func TestGetEvents(t *testing.T) { }}, }, }) - assert.NoError(t, err) + require.NoError(t, err) id := db.Cursor{Ledger: 1, Tx: 5, Op: 0, Event: 0}.String() - assert.NoError(t, err) + require.NoError(t, err) scVal := xdr.ScVal{ Type: xdr.ScValTypeScvU64, U64: &number, } value, err := xdr.MarshalBase64(scVal) - assert.NoError(t, err) + require.NoError(t, err) expected := []EventInfo{ { EventType: EventTypeContract, @@ -928,14 +928,14 @@ func TestGetEvents(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) id := db.Cursor{Ledger: 1, Tx: 4, Op: 0, Event: 0}.String() value, err := xdr.MarshalBase64(xdr.ScVal{ Type: xdr.ScValTypeScvU64, U64: &number, }) - assert.NoError(t, err) + require.NoError(t, err) expected := []EventInfo{ { EventType: EventTypeContract, @@ -1010,7 +1010,7 @@ func TestGetEvents(t *testing.T) { {EventType: map[string]interface{}{EventTypeSystem: nil}}, }, }) - assert.NoError(t, err) + require.NoError(t, err) id := db.Cursor{Ledger: 1, Tx: 1, Op: 0, Event: 1}.String() expected := []EventInfo{ @@ -1046,7 +1046,7 @@ func TestGetEvents(t *testing.T) { contractID := xdr.Hash([32]byte{}) var txMeta []xdr.TransactionMeta - for i := 0; i < 180; i++ { + for i := range 180 { number := xdr.Uint64(i) txMeta = append(txMeta, transactionMetaWithEvents( contractEvent( @@ -1074,10 +1074,10 @@ func TestGetEvents(t *testing.T) { Filters: []EventFilter{}, Pagination: &PaginationOptions{Limit: 10}, }) - assert.NoError(t, err) + require.NoError(t, err) var expected []EventInfo - for i := 0; i < 10; i++ { + for i := range 10 { id := db.Cursor{ Ledger: 1, Tx: uint32(i + 1), @@ -1085,7 +1085,7 @@ func TestGetEvents(t *testing.T) { Event: 0, }.String() value, err := xdr.MarshalBase64(txMeta[i].MustV3().SorobanMeta.Events[0].Body.MustV0().Data) - assert.NoError(t, err) + require.NoError(t, err) expected = append(expected, EventInfo{ EventType: EventTypeContract, Ledger: 1, @@ -1177,7 +1177,7 @@ func TestGetEvents(t *testing.T) { Limit: 2, }, }) - assert.NoError(t, err) + require.NoError(t, err) var expected []EventInfo expectedIDs := []string{ @@ -1187,7 +1187,7 @@ func TestGetEvents(t *testing.T) { symbols := datas[1:3] for i, id := range expectedIDs { expectedXdr, err := xdr.MarshalBase64(xdr.ScVal{Type: xdr.ScValTypeScvSymbol, Sym: &symbols[i]}) - assert.NoError(t, err) + require.NoError(t, err) expected = append(expected, EventInfo{ EventType: EventTypeContract, Ledger: 5, @@ -1210,7 +1210,7 @@ func TestGetEvents(t *testing.T) { Limit: 2, }, }) - assert.NoError(t, err) + require.NoError(t, err) latestLedger := 5 endLedger := min(5+LedgerScanLimit, latestLedger+1) @@ -1444,7 +1444,7 @@ func newTestDB(tb testing.TB) *db.DB { db, err := db.OpenSQLiteDB(dbPath) require.NoError(tb, err) tb.Cleanup(func() { - assert.NoError(tb, db.Close()) + require.NoError(tb, db.Close()) }) return db } diff --git a/cmd/soroban-rpc/internal/methods/get_ledger_entry.go b/cmd/soroban-rpc/internal/methods/get_ledger_entry.go index 7e87217a..24186c31 100644 --- a/cmd/soroban-rpc/internal/methods/get_ledger_entry.go +++ b/cmd/soroban-rpc/internal/methods/get_ledger_entry.go @@ -14,14 +14,14 @@ import ( "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/xdr2json" ) -// Deprecated. Use GetLedgerEntriesRequest instead. +// GetLedgerEntryRequest Deprecated: Use GetLedgerEntriesRequest instead. // TODO(https://github.com/stellar/soroban-tools/issues/374) remove after getLedgerEntries is deployed. type GetLedgerEntryRequest struct { Key string `json:"key"` Format string `json:"xdrFormat"` } -// Deprecated. Use GetLedgerEntriesResponse instead. +// GetLedgerEntryResponse Deprecated: Use GetLedgerEntriesResponse instead. // TODO(https://github.com/stellar/soroban-tools/issues/374) remove after getLedgerEntries is deployed. type GetLedgerEntryResponse struct { EntryXDR string `json:"xdr"` @@ -35,7 +35,7 @@ type GetLedgerEntryResponse struct { } // NewGetLedgerEntryHandler returns a json rpc handler to retrieve the specified ledger entry from stellar core -// Deprecated. use NewGetLedgerEntriesHandler instead. +// Deprecated: use NewGetLedgerEntriesHandler instead // TODO(https://github.com/stellar/soroban-tools/issues/374) remove after getLedgerEntries is deployed. func NewGetLedgerEntryHandler(logger *log.Entry, ledgerEntryReader db.LedgerEntryReader) jrpc2.Handler { return NewHandler(func(ctx context.Context, request GetLedgerEntryRequest) (GetLedgerEntryResponse, error) { diff --git a/cmd/soroban-rpc/internal/methods/get_transactions_test.go b/cmd/soroban-rpc/internal/methods/get_transactions_test.go index 86304147..5a90202f 100644 --- a/cmd/soroban-rpc/internal/methods/get_transactions_test.go +++ b/cmd/soroban-rpc/internal/methods/get_transactions_test.go @@ -69,7 +69,7 @@ func TestGetTransactions_DefaultLimit(t *testing.T) { assert.Equal(t, toid.New(5, 2, 1).String(), response.Cursor) // assert transactions result - assert.Equal(t, 10, len(response.Transactions)) + assert.Len(t, response.Transactions, 10) } func TestGetTransactions_DefaultLimitExceedsLatestLedger(t *testing.T) { @@ -179,7 +179,7 @@ func TestGetTransactions_CustomLimitAndCursor(t *testing.T) { assert.Equal(t, toid.New(3, 1, 1).String(), response.Cursor) // assert transactions result - assert.Equal(t, 3, len(response.Transactions)) + assert.Len(t, response.Transactions, 3) assert.Equal(t, uint32(2), response.Transactions[0].Ledger) assert.Equal(t, uint32(2), response.Transactions[1].Ledger) assert.Equal(t, uint32(3), response.Transactions[2].Ledger) diff --git a/cmd/soroban-rpc/internal/methods/handler_test.go b/cmd/soroban-rpc/internal/methods/handler_test.go index 564f7f51..aba88d4e 100644 --- a/cmd/soroban-rpc/internal/methods/handler_test.go +++ b/cmd/soroban-rpc/internal/methods/handler_test.go @@ -7,6 +7,7 @@ import ( "github.com/creachadair/jrpc2" "github.com/creachadair/jrpc2/handler" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type Request struct { @@ -15,7 +16,7 @@ type Request struct { func TestNewHandlerNoArrayParameters(t *testing.T) { callCount := 0 - f := func(ctx context.Context, request Request) error { + f := func(_ context.Context, request Request) error { callCount++ assert.Equal(t, "bar", request.Parameter) return nil @@ -27,15 +28,15 @@ func TestNewHandlerNoArrayParameters(t *testing.T) { "params": { "parameter": "bar" } }` requests, err := jrpc2.ParseRequests([]byte(objectRequest)) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, requests, 1) finalObjectRequest := requests[0].ToRequest() // object parameters should work with our handlers customHandler := NewHandler(f) _, err = customHandler(context.Background(), finalObjectRequest) - assert.NoError(t, err) - assert.Equal(t, 1, callCount) + require.NoError(t, err) + require.Equal(t, 1, callCount) arrayRequest := `{ "jsonrpc": "2.0", @@ -44,17 +45,17 @@ func TestNewHandlerNoArrayParameters(t *testing.T) { "params": ["bar"] }` requests, err = jrpc2.ParseRequests([]byte(arrayRequest)) - assert.NoError(t, err) - assert.Len(t, requests, 1) + require.NoError(t, err) + require.Len(t, requests, 1) finalArrayRequest := requests[0].ToRequest() // Array requests should work with the normal handler, but not with our handlers stdHandler := handler.New(f) _, err = stdHandler(context.Background(), finalArrayRequest) - assert.NoError(t, err) - assert.Equal(t, 2, callCount) + require.NoError(t, err) + require.Equal(t, 2, callCount) _, err = customHandler(context.Background(), finalArrayRequest) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid parameters") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid parameters") } diff --git a/cmd/soroban-rpc/internal/methods/simulate_transaction.go b/cmd/soroban-rpc/internal/methods/simulate_transaction.go index 8d0a5fe4..b4a332ce 100644 --- a/cmd/soroban-rpc/internal/methods/simulate_transaction.go +++ b/cmd/soroban-rpc/internal/methods/simulate_transaction.go @@ -30,7 +30,8 @@ type SimulateTransactionCost struct { MemoryBytes uint64 `json:"memBytes,string"` } -// SimulateHostFunctionResult contains the simulation result of each HostFunction within the single InvokeHostFunctionOp allowed in a Transaction +// SimulateHostFunctionResult contains the simulation result of each HostFunction +// within the single InvokeHostFunctionOp allowed in a Transaction type SimulateHostFunctionResult struct { AuthXDR []string `json:"auth,omitempty"` AuthJSON []json.RawMessage `json:"authJson,omitempty"` @@ -213,12 +214,16 @@ type SimulateTransactionResponse struct { EventsXDR []string `json:"events,omitempty"` // DiagnosticEvent XDR in base64 EventsJSON []json.RawMessage `json:"eventsJson,omitempty"` - MinResourceFee int64 `json:"minResourceFee,string,omitempty"` - Results []SimulateHostFunctionResult `json:"results,omitempty"` // an array of the individual host function call results - Cost SimulateTransactionCost `json:"cost,omitempty"` // the effective cpu and memory cost of the invoked transaction execution. - RestorePreamble *RestorePreamble `json:"restorePreamble,omitempty"` // If present, it indicates that a prior RestoreFootprint is required - StateChanges []LedgerEntryChange `json:"stateChanges,omitempty"` // If present, it indicates how the state (ledger entries) will change as a result of the transaction execution. - LatestLedger uint32 `json:"latestLedger"` + MinResourceFee int64 `json:"minResourceFee,string,omitempty"` + // an array of the individual host function call results + Results []SimulateHostFunctionResult `json:"results,omitempty"` + // the effective cpu and memory cost of the invoked transaction execution. + Cost SimulateTransactionCost `json:"cost,omitempty"` + // If present, it indicates that a prior RestoreFootprint is required + RestorePreamble *RestorePreamble `json:"restorePreamble,omitempty"` + // If present, it indicates how the state (ledger entries) will change as a result of the transaction execution. + StateChanges []LedgerEntryChange `json:"stateChanges,omitempty"` + LatestLedger uint32 `json:"latestLedger"` } type PreflightGetter interface { @@ -260,7 +265,8 @@ func NewSimulateTransactionHandler(logger *log.Entry, ledgerEntryReader db.Ledge case xdr.OperationTypeExtendFootprintTtl, xdr.OperationTypeRestoreFootprint: if txEnvelope.Type != xdr.EnvelopeTypeEnvelopeTypeTx && txEnvelope.V1.Tx.Ext.V != 1 { return SimulateTransactionResponse{ - Error: "To perform a SimulateTransaction for ExtendFootprintTtl or RestoreFootprint operations, SorobanTransactionData must be provided", + Error: "To perform a SimulateTransaction for ExtendFootprintTtl or RestoreFootprint operations," + + " SorobanTransactionData must be provided", } } footprint = txEnvelope.V1.Tx.Ext.SorobanData.Resources.Footprint @@ -374,7 +380,7 @@ func NewSimulateTransactionHandler(logger *log.Entry, ledgerEntryReader db.Ledge } stateChanges := make([]LedgerEntryChange, len(result.LedgerEntryDiff)) - for i := 0; i < len(stateChanges); i++ { + for i := range stateChanges { if err := stateChanges[i].FromXDRDiff(result.LedgerEntryDiff[i], request.Format); err != nil { return SimulateTransactionResponse{ Error: err.Error(), diff --git a/cmd/soroban-rpc/internal/methods/util_test.go b/cmd/soroban-rpc/internal/methods/util_test.go index b183a318..399224e4 100644 --- a/cmd/soroban-rpc/internal/methods/util_test.go +++ b/cmd/soroban-rpc/internal/methods/util_test.go @@ -85,10 +85,10 @@ func createMockLedgerCloseMeta(ledgerSequence uint32) xdr.LedgerCloseMeta { func NewTestDB(tb testing.TB) *db.DB { tmp := tb.TempDir() dbPath := path.Join(tmp, "db.sqlite") - db, err := db.OpenSQLiteDB(dbPath) + dbConn, err := db.OpenSQLiteDB(dbPath) require.NoError(tb, err) tb.Cleanup(func() { - require.NoError(tb, db.Close()) + require.NoError(tb, dbConn.Close()) }) - return db + return dbConn } diff --git a/cmd/soroban-rpc/internal/preflight/pool.go b/cmd/soroban-rpc/internal/preflight/pool.go index 71ad82b4..ec59f615 100644 --- a/cmd/soroban-rpc/internal/preflight/pool.go +++ b/cmd/soroban-rpc/internal/preflight/pool.go @@ -16,13 +16,17 @@ import ( "github.com/stellar/soroban-rpc/cmd/soroban-rpc/internal/db" ) +const ( + dbMetricsDurationConversionValue = 1000.0 +) + type workerResult struct { preflight Preflight err error } type workerRequest struct { - ctx context.Context + ctx context.Context //nolint:containedctx params Parameters resultChan chan<- workerResult } @@ -84,14 +88,14 @@ func NewPreflightWorkerPool(cfg WorkerPoolConfig) *WorkerPool { Subsystem: "preflight_pool", Name: "request_ledger_get_duration_seconds", Help: "preflight request duration broken down by status", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }, []string{"status", "type"}) preflightWP.ledgerEntriesFetchedMetric = prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: cfg.Daemon.MetricsNamespace(), Subsystem: "preflight_pool", Name: "request_ledger_entries_fetched", Help: "ledger entries fetched by simulate transaction calls", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, //nolint:mnd }) cfg.Daemon.MetricsRegistry().MustRegister( requestQueueMetric, @@ -180,7 +184,7 @@ func (pwp *WorkerPool) GetPreflight(ctx context.Context, params GetterParameters } pwp.durationMetric.With( prometheus.Labels{"type": "db", "status": status}, - ).Observe(float64(wrappedTx.totalDurationMs) / 1000.0) + ).Observe(float64(wrappedTx.totalDurationMs) / dbMetricsDurationConversionValue) } pwp.ledgerEntriesFetchedMetric.Observe(float64(wrappedTx.ledgerEntriesFetched)) return result.preflight, result.err diff --git a/cmd/soroban-rpc/internal/preflight/preflight.go b/cmd/soroban-rpc/internal/preflight/preflight.go index 2c4485d8..fe2cc3a5 100644 --- a/cmd/soroban-rpc/internal/preflight/preflight.go +++ b/cmd/soroban-rpc/internal/preflight/preflight.go @@ -46,7 +46,10 @@ const ( // //export SnapshotSourceGet func SnapshotSourceGet(handle C.uintptr_t, cLedgerKey C.xdr_t) C.xdr_t { - h := cgo.Handle(handle).Value().(snapshotSourceHandle) + h, ok := cgo.Handle(handle).Value().(snapshotSourceHandle) + if !ok { + panic("invalid handle type: expected snapshotSourceHandle") + } ledgerKeyXDR := GoXDR(cLedgerKey) var ledgerKey xdr.LedgerKey if err := xdr.SafeUnmarshal(ledgerKeyXDR, &ledgerKey); err != nil { diff --git a/cmd/soroban-rpc/internal/preflight/preflight_test.go b/cmd/soroban-rpc/internal/preflight/preflight_test.go index 0de5417e..c1bd72a7 100644 --- a/cmd/soroban-rpc/internal/preflight/preflight_test.go +++ b/cmd/soroban-rpc/internal/preflight/preflight_test.go @@ -26,7 +26,7 @@ var ( var contractCostParams = func() *xdr.ContractCostParams { var result xdr.ContractCostParams - for i := 0; i < 23; i++ { + for i := range 23 { result = append(result, xdr.ContractCostParamEntry{ Ext: xdr.ExtensionPoint{}, ConstTerm: xdr.Int64((i + 1) * 10), @@ -476,6 +476,10 @@ func benchmark(b *testing.B, config benchmarkConfig) { func BenchmarkGetPreflight(b *testing.B) { b.Run("In-memory storage", func(b *testing.B) { benchmark(b, benchmarkConfig{}) }) b.Run("DB storage", func(b *testing.B) { benchmark(b, benchmarkConfig{useDB: &benchmarkDBConfig{}}) }) - b.Run("DB storage, restarting", func(b *testing.B) { benchmark(b, benchmarkConfig{useDB: &benchmarkDBConfig{restart: true}}) }) - b.Run("DB storage, no cache", func(b *testing.B) { benchmark(b, benchmarkConfig{useDB: &benchmarkDBConfig{disableCache: true}}) }) + b.Run("DB storage, restarting", func(b *testing.B) { + benchmark(b, benchmarkConfig{useDB: &benchmarkDBConfig{restart: true}}) + }) + b.Run("DB storage, no cache", func(b *testing.B) { + benchmark(b, benchmarkConfig{useDB: &benchmarkDBConfig{disableCache: true}}) + }) } diff --git a/go.mod b/go.mod index c604919d..17b93fe7 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.22.1 require ( github.com/Masterminds/squirrel v1.5.4 - github.com/cenkalti/backoff/v4 v4.2.1 + github.com/cenkalti/backoff/v4 v4.3.0 github.com/creachadair/jrpc2 v1.2.0 github.com/go-chi/chi v4.1.2+incompatible github.com/mattn/go-sqlite3 v1.14.17 @@ -18,17 +18,17 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stellar/go v0.0.0-20240628063057-b589529f102f + github.com/stellar/go v0.0.0-20240729151841-8b1dba46985c github.com/stretchr/testify v1.9.0 ) require ( - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go v0.114.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/storage v1.40.0 // indirect + cloud.google.com/go/storage v1.42.0 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -42,14 +42,14 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -63,7 +63,7 @@ require ( github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 // indirect @@ -82,26 +82,26 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.14.0 // indirect - google.golang.org/api v0.177.0 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect - google.golang.org/grpc v1.63.2 // indirect - google.golang.org/protobuf v1.34.1 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/api v0.183.0 // indirect + google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/djherbis/atime.v1 v1.0.0 // indirect gopkg.in/djherbis/stream.v1 v1.3.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index ac3ba4e5..b36601aa 100644 --- a/go.sum +++ b/go.sum @@ -17,10 +17,10 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= -cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -35,20 +35,22 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.37.0 h1:0uEEfaB1VIJzabPpwpZf44zWAKAme3zwKKxHk7vJQxQ= -cloud.google.com/go/pubsub v1.37.0/go.mod h1:YQOQr1uiUM092EXwKs56OPT650nwnawc+8/IjoUeGzQ= +cloud.google.com/go/pubsub v1.38.0 h1:J1OT7h51ifATIedjqk/uBNPh+1hkvUaH4VKbz4UuAsc= +cloud.google.com/go/pubsub v1.38.0/go.mod h1:IPMJSWSus/cu57UyR01Jqa/bNOQA+XnPF6Z4dKW4fAA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU= +cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= @@ -68,8 +70,8 @@ github.com/aws/aws-sdk-go v1.45.27 h1:b+zOTPkAG4i2RvqPdHxkJZafmhhVaVHBp4r41Tu4I6 github.com/aws/aws-sdk-go v1.45.27/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -107,8 +109,8 @@ github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0X github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsouza/fake-gcs-server v1.49.0 h1:4x1RxKuqoqhZrXogtj5nInQnIjQylxld43tKrkPHnmE= -github.com/fsouza/fake-gcs-server v1.49.0/go.mod h1:FJYZxdHQk2nGxrczFjLbDv8h6SnYXxSxcnM14eeespA= +github.com/fsouza/fake-gcs-server v1.49.2 h1:fukDqzEQM50QkA0jAbl6cLqeDu3maQjwZBuys759TR4= +github.com/fsouza/fake-gcs-server v1.49.2/go.mod h1:17SYzJEXRcaAA5ATwwvgBkSIqIy7r1icnGM0y/y4foY= github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4= github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= @@ -121,8 +123,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= @@ -182,8 +184,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -207,8 +209,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= @@ -340,10 +342,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= -github.com/stellar/go v0.0.0-20240617183518-100dc4fa6043 h1:5UQzsvt9VtD3ijpzPtdW0/lXWCNgDs6GzmLUE8ZuWfk= -github.com/stellar/go v0.0.0-20240617183518-100dc4fa6043/go.mod h1:TuXKLL7WViqwrvpWno2I4UYGn2Ny9KZld1jUIN6fnK8= -github.com/stellar/go v0.0.0-20240628063057-b589529f102f h1:3W9JZJ0r87wy2M3wsACuJtKW/cNWXpfw5Jwyt89Am30= -github.com/stellar/go v0.0.0-20240628063057-b589529f102f/go.mod h1:4cVjIVyU8V1iSBEMGd41j22DAyBoz2SVL5TcrJPqePU= +github.com/stellar/go v0.0.0-20240729151841-8b1dba46985c h1:8+XiaqgCaFWUjMKpQ37OJzY8LZ6ie8BpLxq/wNQZ2eM= +github.com/stellar/go v0.0.0-20240729151841-8b1dba46985c/go.mod h1:rrFK7a8i2h9xad9HTfnSN/dTNEqXVHKAbkFeR7UxAgs= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2 h1:OzCVd0SV5qE3ZcDeSFCmOWLZfEWZ3Oe8KtmSOYKEVWE= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2/go.mod h1:yoxyU/M8nl9LKeWIoBrbDPQ7Cy+4jxRcWcOayZ4BMps= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -369,12 +369,12 @@ github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGG github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= github.com/xdrpp/goxdr v0.1.1 h1:E1B2c6E8eYhOVyd7yEpOyopzTPirUeF6mVOfXfGyJyc= github.com/xdrpp/goxdr v0.1.1/go.mod h1:dXo1scL/l6s7iME1gxHWo2XCppbHEKZS7m/KyYWkNzA= -github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 h1:KM4T3G70MiR+JtqplcYkNVoNz7pDwYaBxWBXQK804So= -github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c h1:XZWnr3bsDQWAZg4Ne+cPoXRPILrNlPNQfxBuwLl43is= -github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce h1:cVSRGH8cOveJNwFEEZLXtB+XMnRqKLjUP6V/ZFYQCXI= -github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb h1:06WAhQa+mYv7BiOk13B/ywyTlkoE/S7uu6TBKU6FHnE= github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d h1:yJIizrfO599ot2kQ6Af1enICnwBD3XoxgX3MrMwot2M= @@ -398,14 +398,14 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -416,8 +416,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -454,8 +454,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -491,8 +491,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -502,8 +502,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -557,13 +557,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -573,8 +573,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -628,8 +628,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -655,8 +655,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= -google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -700,12 +700,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 h1:DTJM0R8LECCgFeUwApvcEJHz85HLagW8uRENYxHh1ww= -google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6/go.mod h1:10yRODfgim2/T8csjQsMPgZOMvtytXKTDRzH6HRGzRw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -722,8 +722,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -734,8 +734,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=