diff --git a/.github/workflows/horizon.yml b/.github/workflows/horizon.yml index 677a60b835..bf9cae7246 100644 --- a/.github/workflows/horizon.yml +++ b/.github/workflows/horizon.yml @@ -162,6 +162,7 @@ jobs: ledger-exporter: name: Test and push the Ledger Exporter images runs-on: ubuntu-latest + if: false # Disable the job steps: - uses: actions/checkout@v3 with: diff --git a/Makefile b/Makefile index abacb05dd8..13486ea7d7 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,8 @@ xdr/Stellar-contract.x \ xdr/Stellar-internal.x \ xdr/Stellar-contract-config-setting.x -XDRS = $(DOWNLOADABLE_XDRS) xdr/Stellar-lighthorizon.x - +XDRS = $(DOWNLOADABLE_XDRS) xdr/Stellar-lighthorizon.x \ + xdr/Stellar-exporter.x XDRGEN_COMMIT=e2cac557162d99b12ae73b846cf3d5bfe16636de diff --git a/exp/services/ledgerexporter/README.md b/exp/services/ledgerexporter/README.md new file mode 100644 index 0000000000..ad8a3ddeae --- /dev/null +++ b/exp/services/ledgerexporter/README.md @@ -0,0 +1,83 @@ +# Ledger Exporter (Work in Progress) + +The Ledger Exporter is a tool designed to export ledger data from a Stellar network and upload it to a specified destination. It supports both bounded and unbounded modes, allowing users to export a specific range of ledgers or continuously export new ledgers as they arrive on the network. + +Ledger Exporter currently uses captive-core as the ledger backend and GCS as the destination data store. + +# Exported Data Format +The tool allows for the export of multiple ledgers in a single exported file. The exported data is in XDR format and is compressed using gzip before being uploaded. + +```go +type LedgerCloseMetaBatch struct { + StartSequence uint32 + EndSequence uint32 + LedgerCloseMetas []LedgerCloseMeta +} +``` + +## Getting Started + +### Installation (coming soon) + +### Command Line Options + +#### Bounded Mode: +Exports a specific range of ledgers, defined by --start and --end. +```bash +ledgerexporter --start --end --config-file +``` + +#### Unbounded Mode: +Exports ledgers continuously starting from --start. In this mode, the end ledger is either not provided or set to 0. +```bash +ledgerexporter --start --config-file +``` + + +Starts exporting from a specified number of ledgers before the latest ledger sequence number on the network. +```bash +ledgerexporter --from-last --config-file +``` + +### Configuration (toml): + +```toml +network = "testnet" # Options: `testnet` or `pubnet` +destination_url = "gcs://your-bucket-name" + +[exporter_config] +ledgers_per_file = 64 +files_per_partition = 10 +``` + +#### Stellar-core configuration: +- The exporter automatically configures stellar-core based on the network specified in the config. +- Ensure you have stellar-core installed and accessible in your system's $PATH. + +### Exported Files + +#### File Organization: +- Ledgers are grouped into files, with the number of ledgers per file set by `ledgers_per_file`. +- Files are further organized into partitions, with the number of files per partition set by `files_per_partition`. + +### Filename Structure: +- Filenames indicate the ledger range they contain, e.g., `0-63.xdr.gz` holds ledgers 0 to 63. +- Partition directories group files, e.g., `/0-639/` holds files for ledgers 0 to 639. + +#### Example: +with `ledgers_per_file = 64` and `files_per_partition = 10`: +- Partition names: `/0-639`, `/640-1279`, ... +- Filenames: `/0-639/0-63.xdr.gz`, `/0-639/64-127.xdr.gz`, ... + +#### Special Cases: + +- If `ledgers_per_file` is set to 1, filenames will only contain the ledger number. +- If `files_per_partition` is set to 1, filenames will not contain the partition. + +#### Note: +- Avoid changing `ledgers_per_file` and `files_per_partition` after configuration for consistency. + +#### Retrieving Data: +- To locate a specific ledger sequence, calculate the partition name and ledger file name using `files_per_partition` and `ledgers_per_file`. +- The `GetObjectKeyFromSequenceNumber` function automates this calculation. + diff --git a/exp/services/ledgerexporter/config.toml b/exp/services/ledgerexporter/config.toml new file mode 100644 index 0000000000..30c4a4fafe --- /dev/null +++ b/exp/services/ledgerexporter/config.toml @@ -0,0 +1,7 @@ +network = "testnet" +destination_url = "gcs://exporter-test/ledgers" + +[exporter_config] + ledgers_per_file = 1 + files_per_partition = 64000 + diff --git a/exp/services/ledgerexporter/internal/app.go b/exp/services/ledgerexporter/internal/app.go new file mode 100644 index 0000000000..fb4a5f788b --- /dev/null +++ b/exp/services/ledgerexporter/internal/app.go @@ -0,0 +1,131 @@ +package ledgerexporter + +import ( + "context" + _ "embed" + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/pkg/errors" + "github.com/stellar/go/ingest/ledgerbackend" + _ "github.com/stellar/go/network" + "github.com/stellar/go/support/log" +) + +var ( + logger = log.New().WithField("service", "ledger-exporter") +) + +type App struct { + config Config + ledgerBackend ledgerbackend.LedgerBackend + dataStore DataStore + exportManager ExportManager + uploader Uploader +} + +func NewApp() *App { + logger.SetLevel(log.DebugLevel) + + config := Config{} + err := config.LoadConfig() + logFatalIf(err, "Could not load configuration") + + app := &App{config: config} + return app +} + +func (a *App) init(ctx context.Context) { + a.dataStore = mustNewDataStore(ctx, &a.config) + a.ledgerBackend = mustNewLedgerBackend(ctx, a.config) + a.exportManager = NewExportManager(a.config.ExporterConfig, a.ledgerBackend) + a.uploader = NewUploader(a.dataStore, a.exportManager.GetMetaArchiveChannel()) +} + +func (a *App) close() { + if err := a.dataStore.Close(); err != nil { + logger.WithError(err).Error("Error closing datastore") + } + if err := a.ledgerBackend.Close(); err != nil { + logger.WithError(err).Error("Error closing ledgerBackend") + } +} + +func (a *App) Run() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + a.init(ctx) + defer a.close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + + err := a.uploader.Run(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + logger.WithError(err).Error("Error executing Uploader") + cancel() + } + }() + + go func() { + defer wg.Done() + + err := a.exportManager.Run(ctx, a.config.StartLedger, a.config.EndLedger) + if err != nil && !errors.Is(err, context.Canceled) { + logger.WithError(err).Error("Error executing ExportManager") + cancel() + } + }() + + // Handle OS signals to gracefully terminate the service + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + logger.Infof("Received termination signal: %v", sig) + cancel() + }() + + wg.Wait() + logger.Info("Shutting down ledger-exporter") +} + +func mustNewDataStore(ctx context.Context, config *Config) DataStore { + dataStore, err := NewDataStore(ctx, fmt.Sprintf("%s/%s", config.DestinationURL, config.Network)) + logFatalIf(err, "Could not connect to destination data store") + return dataStore +} + +// mustNewLedgerBackend Creates and initializes captive core ledger backend +// Currently, only supports captive-core as ledger backend +func mustNewLedgerBackend(ctx context.Context, config Config) ledgerbackend.LedgerBackend { + captiveConfig := config.GenerateCaptiveCoreConfig() + + // Create a new captive core backend + backend, err := ledgerbackend.NewCaptive(captiveConfig) + logFatalIf(err, "Failed to create captive-core instance") + + var ledgerRange ledgerbackend.Range + if config.EndLedger == 0 { + ledgerRange = ledgerbackend.UnboundedRange(config.StartLedger) + } else { + ledgerRange = ledgerbackend.BoundedRange(config.StartLedger, config.EndLedger) + } + + err = backend.PrepareRange(ctx, ledgerRange) + logFatalIf(err, "Could not prepare captive core ledger backend") + return backend +} + +func logFatalIf(err error, message string, args ...interface{}) { + if err != nil { + logger.WithError(err).Fatalf(message, args...) + } +} diff --git a/exp/services/ledgerexporter/internal/config.go b/exp/services/ledgerexporter/internal/config.go new file mode 100644 index 0000000000..640841b1d9 --- /dev/null +++ b/exp/services/ledgerexporter/internal/config.go @@ -0,0 +1,192 @@ +package ledgerexporter + +import ( + _ "embed" + "flag" + "os/exec" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + + "github.com/pelletier/go-toml" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/ordered" +) + +const Pubnet = "pubnet" +const Testnet = "testnet" + +type StellarCoreConfig struct { + NetworkPassphrase string `toml:"network_passphrase"` + HistoryArchiveUrls []string `toml:"history_archive_urls"` + StellarCoreBinaryPath string `toml:"stellar_core_binary_path"` + CaptiveCoreTomlPath string `toml:"captive_core_toml_path"` +} + +type Config struct { + Network string `toml:"network"` + DestinationURL string `toml:"destination_url"` + ExporterConfig ExporterConfig `toml:"exporter_config"` + StellarCoreConfig StellarCoreConfig `toml:"stellar_core_config"` + + //From command-line + StartLedger uint32 `toml:"start"` + EndLedger uint32 `toml:"end"` + StartFromLastLedgers uint32 `toml:"from-last"` +} + +func (config *Config) LoadConfig() error { + // Parse command-line options + startLedger := flag.Uint("start", 0, "Starting ledger") + endLedger := flag.Uint("end", 0, "Ending ledger (inclusive)") + startFromLastNLedger := flag.Uint("from-last", 0, "Start streaming from last N ledgers") + + configFilePath := flag.String("config-file", "config.toml", "Path to the TOML config file") + flag.Parse() + + config.StartLedger = uint32(*startLedger) + config.EndLedger = uint32(*endLedger) + config.StartFromLastLedgers = uint32(*startFromLastNLedger) + + // Load config TOML file + cfg, err := toml.LoadFile(*configFilePath) + if err != nil { + return err + } + + // Unmarshal TOML data into the Config struct + err = cfg.Unmarshal(config) + logFatalIf(err, "Error unmarshalling TOML config.") + logger.Infof("Config: %v", *config) + + var historyArchiveUrls []string + switch config.Network { + case Pubnet: + historyArchiveUrls = network.PublicNetworkhistoryArchiveURLs + case Testnet: + historyArchiveUrls = network.TestNetworkhistoryArchiveURLs + default: + logger.Fatalf("Invalid network %s", config.Network) + } + + // Retrieve the latest ledger sequence from history archives + latestNetworkLedger, err := getLatestLedgerSequenceFromHistoryArchives(historyArchiveUrls) + logFatalIf(err, "Failed to retrieve the latest ledger sequence from history archives.") + + // Validate config params + err = config.validateAndSetLedgerRange(latestNetworkLedger) + logFatalIf(err, "Error validating config params.") + + // Validate and build the appropriate range + // TODO: Make it configurable + config.adjustLedgerRange() + + return nil +} + +func (config *Config) validateAndSetLedgerRange(latestNetworkLedger uint32) error { + if config.StartFromLastLedgers > 0 && (config.StartLedger > 0 || config.EndLedger > 0) { + return errors.New("--from-last cannot be used with --start or --end") + } + + if config.StartFromLastLedgers > 0 { + if config.StartFromLastLedgers > latestNetworkLedger { + return errors.Errorf("--from-last %d exceeds latest network ledger %d", + config.StartLedger, latestNetworkLedger) + } + config.StartLedger = latestNetworkLedger - config.StartFromLastLedgers + logger.Infof("Setting start ledger to %d, calculated as latest ledger (%d) minus --from-last value (%d)", + config.StartLedger, latestNetworkLedger, config.StartFromLastLedgers) + } + + if config.StartLedger > latestNetworkLedger { + return errors.Errorf("--start %d exceeds latest network ledger %d", + config.StartLedger, latestNetworkLedger) + } + + // Ensure that the start ledger is at least 2. + config.StartLedger = ordered.Max(2, config.StartLedger) + + if config.EndLedger != 0 { // Bounded mode + if config.EndLedger < config.StartLedger { + return errors.New("invalid --end value, must be >= --start") + } + if config.EndLedger > latestNetworkLedger { + return errors.Errorf("--end %d exceeds latest network ledger %d", + config.EndLedger, latestNetworkLedger) + } + } + + return nil +} + +func (config *Config) adjustLedgerRange() { + logger.Infof("Requested ledger range start=%d, end=%d", config.StartLedger, config.EndLedger) + + // Check if either the start or end ledger does not fall on the "LedgersPerFile" boundary + // and adjust the start and end ledger accordingly. + // Align the start ledger to the nearest "LedgersPerFile" boundary. + config.StartLedger = config.StartLedger / config.ExporterConfig.LedgersPerFile * config.ExporterConfig.LedgersPerFile + + // Ensure that the adjusted start ledger is at least 2. + config.StartLedger = ordered.Max(2, config.StartLedger) + + // Align the end ledger (for bounded cases) to the nearest "LedgersPerFile" boundary. + if config.EndLedger != 0 { + // Add an extra batch only if "LedgersPerFile" is greater than 1 and the end ledger doesn't fall on the boundary. + if config.ExporterConfig.LedgersPerFile > 1 && config.EndLedger%config.ExporterConfig.LedgersPerFile != 0 { + config.EndLedger = (config.EndLedger/config.ExporterConfig.LedgersPerFile + 1) * config.ExporterConfig.LedgersPerFile + } + } + + logger.Infof("Adjusted ledger range: start=%d, end=%d", config.StartLedger, config.EndLedger) +} + +func (config *Config) GenerateCaptiveCoreConfig() ledgerbackend.CaptiveCoreConfig { + coreConfig := &config.StellarCoreConfig + + // Look for stellar-core binary in $PATH, if not supplied + if coreConfig.StellarCoreBinaryPath == "" { + var err error + coreConfig.StellarCoreBinaryPath, err = exec.LookPath("stellar-core") + logFatalIf(err, "Failed to find stellar-core binary") + } + + var captiveCoreConfig []byte + // Default network config + switch config.Network { + case Pubnet: + coreConfig.NetworkPassphrase = network.PublicNetworkPassphrase + coreConfig.HistoryArchiveUrls = network.PublicNetworkhistoryArchiveURLs + captiveCoreConfig = ledgerbackend.PubnetDefaultConfig + + case Testnet: + coreConfig.NetworkPassphrase = network.TestNetworkPassphrase + coreConfig.HistoryArchiveUrls = network.TestNetworkhistoryArchiveURLs + captiveCoreConfig = ledgerbackend.TestnetDefaultConfig + + default: + logger.Fatalf("Invalid network %s", config.Network) + } + + params := ledgerbackend.CaptiveCoreTomlParams{ + NetworkPassphrase: coreConfig.NetworkPassphrase, + HistoryArchiveURLs: coreConfig.HistoryArchiveUrls, + UseDB: true, + } + + captiveCoreToml, err := ledgerbackend.NewCaptiveCoreTomlFromData(captiveCoreConfig, params) + logFatalIf(err, "Failed to create captive-core toml") + + return ledgerbackend.CaptiveCoreConfig{ + BinaryPath: coreConfig.StellarCoreBinaryPath, + NetworkPassphrase: params.NetworkPassphrase, + HistoryArchiveURLs: params.HistoryArchiveURLs, + CheckpointFrequency: historyarchive.DefaultCheckpointFrequency, + Log: logger.WithField("subservice", "stellar-core"), + Toml: captiveCoreToml, + UserAgent: "ledger-exporter", + UseDB: true, + } +} diff --git a/exp/services/ledgerexporter/internal/config_test.go b/exp/services/ledgerexporter/internal/config_test.go new file mode 100644 index 0000000000..6a320c7424 --- /dev/null +++ b/exp/services/ledgerexporter/internal/config_test.go @@ -0,0 +1,170 @@ +package ledgerexporter + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateStartAndEndLedger(t *testing.T) { + const latestNetworkLedger = 20000 + + config := &Config{ + ExporterConfig: ExporterConfig{ + LedgersPerFile: 1, + }, + } + tests := []struct { + name string + startLedger uint32 + endLedger uint32 + errMsg string + }{ + { + name: "End ledger same as latest ledger", + startLedger: 512, + endLedger: 512, + errMsg: "", + }, + { + name: "End ledger greater than start ledger", + startLedger: 512, + endLedger: 600, + errMsg: "", + }, + { + name: "No end ledger provided, unbounded mode", + startLedger: 512, + endLedger: 0, + errMsg: "", + }, + { + name: "End ledger before start ledger", + startLedger: 512, + endLedger: 2, + errMsg: "invalid --end value, must be >= --start", + }, + { + name: "End ledger exceeds latest ledger", + startLedger: 512, + endLedger: latestNetworkLedger + 1, + errMsg: fmt.Sprintf("--end %d exceeds latest network ledger %d", + latestNetworkLedger+1, latestNetworkLedger), + }, + { + name: "Start ledger 0", + startLedger: 0, + endLedger: 2, + errMsg: "", + }, + { + name: "Start ledger exceeds latest ledger", + startLedger: latestNetworkLedger + 1, + endLedger: 0, + errMsg: fmt.Sprintf("--start %d exceeds latest network ledger %d", + latestNetworkLedger+1, latestNetworkLedger), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config.StartLedger = tt.startLedger + config.EndLedger = tt.endLedger + if tt.errMsg != "" { + require.Equal(t, tt.errMsg, config.validateAndSetLedgerRange(latestNetworkLedger).Error()) + } else { + require.NoError(t, config.validateAndSetLedgerRange(latestNetworkLedger)) + } + }) + } +} + +func TestAdjustLedgerRangeBoundedMode(t *testing.T) { + tests := []struct { + name string + config *Config + expected *Config + }{ + { + name: "Min start ledger 2", + config: &Config{StartLedger: 0, EndLedger: 10, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + expected: &Config{StartLedger: 2, EndLedger: 10, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + }, + { + name: "No change, 1 ledger per file", + config: &Config{StartLedger: 2, EndLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + expected: &Config{StartLedger: 2, EndLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + }, + { + name: "Min start ledger2, round up end ledger, 10 ledgers per file", + config: &Config{StartLedger: 0, EndLedger: 1, ExporterConfig: ExporterConfig{LedgersPerFile: 10}}, + expected: &Config{StartLedger: 2, EndLedger: 10, ExporterConfig: ExporterConfig{LedgersPerFile: 10}}, + }, + { + name: "Round down start ledger and round up end ledger, 15 ledgers per file ", + config: &Config{StartLedger: 4, EndLedger: 10, ExporterConfig: ExporterConfig{LedgersPerFile: 15}}, + expected: &Config{StartLedger: 2, EndLedger: 15, ExporterConfig: ExporterConfig{LedgersPerFile: 15}}, + }, + { + name: "Round down start ledger and round up end ledger, 64 ledgers per file ", + config: &Config{StartLedger: 400, EndLedger: 500, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + expected: &Config{StartLedger: 384, EndLedger: 512, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + }, + { + name: "No change, 64 ledger per file", + config: &Config{StartLedger: 64, EndLedger: 128, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + expected: &Config{StartLedger: 64, EndLedger: 128, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.config.adjustLedgerRange() + require.EqualValues(t, tt.expected.StartLedger, tt.config.StartLedger) + require.EqualValues(t, tt.expected.EndLedger, tt.config.EndLedger) + }) + } +} + +func TestAdjustLedgerRangeUnBoundedMode(t *testing.T) { + tests := []struct { + name string + config *Config + expected *Config + }{ + { + name: "Min start ledger 2", + config: &Config{StartLedger: 0, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + expected: &Config{StartLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + }, + { + name: "No change, 1 ledger per file", + config: &Config{StartLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + expected: &Config{StartLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 1}}, + }, + { + name: "Round down start ledger, 15 ledgers per file ", + config: &Config{StartLedger: 4, ExporterConfig: ExporterConfig{LedgersPerFile: 15}}, + expected: &Config{StartLedger: 2, ExporterConfig: ExporterConfig{LedgersPerFile: 15}}, + }, + { + name: "Round down start ledger, 64 ledgers per file ", + config: &Config{StartLedger: 400, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + expected: &Config{StartLedger: 384, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + }, + { + name: "No change, 64 ledger per file", + config: &Config{StartLedger: 64, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + expected: &Config{StartLedger: 64, ExporterConfig: ExporterConfig{LedgersPerFile: 64}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.config.adjustLedgerRange() + require.EqualValues(t, int(tt.expected.StartLedger), int(tt.config.StartLedger)) + require.EqualValues(t, int(tt.expected.EndLedger), int(tt.config.EndLedger)) + }) + } +} diff --git a/exp/services/ledgerexporter/internal/datastore.go b/exp/services/ledgerexporter/internal/datastore.go new file mode 100644 index 0000000000..0367e9008e --- /dev/null +++ b/exp/services/ledgerexporter/internal/datastore.go @@ -0,0 +1,57 @@ +package ledgerexporter + +import ( + "context" + "io" + "strings" + + "cloud.google.com/go/storage" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/url" + "google.golang.org/api/option" +) + +// DataStore defines an interface for interacting with data storage +type DataStore interface { + GetFile(ctx context.Context, path string) (io.ReadCloser, error) + PutFile(ctx context.Context, path string, in io.WriterTo) error + PutFileIfNotExists(ctx context.Context, path string, in io.WriterTo) error + Exists(ctx context.Context, path string) (bool, error) + Size(ctx context.Context, path string) (int64, error) + Close() error +} + +// NewDataStore creates a new DataStore based on the destination URL. +// Currently, only accepts GCS URLs. +func NewDataStore(ctx context.Context, destinationURL string) (DataStore, error) { + parsed, err := url.Parse(destinationURL) + if err != nil { + return nil, err + } + + pth := parsed.Path + if parsed.Scheme != "gcs" { + return nil, errors.Errorf("Invalid destination URL %s. Expected GCS URL ", destinationURL) + } + + // Inside gcs, all paths start _without_ the leading / + pth = strings.TrimPrefix(pth, "/") + bucketName := parsed.Host + prefix := pth + + logger.Infof("creating GCS client for bucket: %s, prefix: %s", bucketName, prefix) + + var options []option.ClientOption + client, err := storage.NewClient(ctx, options...) + if err != nil { + return nil, err + } + + // Check the bucket exists + bucket := client.Bucket(bucketName) + if _, err := bucket.Attrs(ctx); err != nil { + return nil, errors.Wrap(err, "failed to retrieve bucket attributes") + } + + return &GCSDataStore{client: client, bucket: bucket, prefix: prefix}, nil +} diff --git a/exp/services/ledgerexporter/internal/exportmanager.go b/exp/services/ledgerexporter/internal/exportmanager.go new file mode 100644 index 0000000000..de322aa30c --- /dev/null +++ b/exp/services/ledgerexporter/internal/exportmanager.go @@ -0,0 +1,114 @@ +package ledgerexporter + +import ( + "context" + + "github.com/pkg/errors" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/xdr" +) + +type ExporterConfig struct { + LedgersPerFile uint32 `toml:"ledgers_per_file"` + FilesPerPartition uint32 `toml:"files_per_partition"` +} + +// ExportManager manages the creation and handling of export objects. +type ExportManager interface { + GetMetaArchiveChannel() chan *LedgerMetaArchive + Run(ctx context.Context, startLedger uint32, endLedger uint32) error + AddLedgerCloseMeta(ctx context.Context, ledgerCloseMeta xdr.LedgerCloseMeta) error +} + +type exportManager struct { + config ExporterConfig + ledgerBackend ledgerbackend.LedgerBackend + currentMetaArchive *LedgerMetaArchive + metaArchiveCh chan *LedgerMetaArchive +} + +// NewExportManager creates a new ExportManager with the provided configuration. +func NewExportManager(config ExporterConfig, backend ledgerbackend.LedgerBackend) ExportManager { + return &exportManager{ + config: config, + ledgerBackend: backend, + metaArchiveCh: make(chan *LedgerMetaArchive, 1), + } +} + +// GetMetaArchiveChannel returns a channel that receives LedgerMetaArchive objects. +func (e *exportManager) GetMetaArchiveChannel() chan *LedgerMetaArchive { + return e.metaArchiveCh +} + +// AddLedgerCloseMeta adds ledger metadata to the current export object +func (e *exportManager) AddLedgerCloseMeta(ctx context.Context, ledgerCloseMeta xdr.LedgerCloseMeta) error { + ledgerSeq := ledgerCloseMeta.LedgerSequence() + + // Determine the object key for the given ledger sequence + objectKey, err := GetObjectKeyFromSequenceNumber(e.config, ledgerSeq) + if err != nil { + return errors.Wrapf(err, "failed to get object key for ledger %d", ledgerSeq) + } + if e.currentMetaArchive != nil && e.currentMetaArchive.GetObjectKey() != objectKey { + return errors.New("Current meta archive object key mismatch") + } + if e.currentMetaArchive == nil { + endSeq := ledgerSeq + e.config.LedgersPerFile - 1 + if ledgerSeq < e.config.LedgersPerFile { + // Special case: Adjust the end ledger sequence for the first batch. + // Since the start ledger is 2 instead of 0, we want to ensure that the end ledger sequence + // does not exceed LedgersPerFile. + // For example, if LedgersPerFile is 64, the file name for the first batch should be 0-63, not 2-66. + endSeq = e.config.LedgersPerFile - 1 + } + + // Create a new LedgerMetaArchive and add it to the map. + e.currentMetaArchive = NewLedgerMetaArchive(objectKey, ledgerSeq, endSeq) + } + + err = e.currentMetaArchive.AddLedger(ledgerCloseMeta) + if err != nil { + return errors.Wrapf(err, "failed to add ledger %d", ledgerSeq) + } + + if ledgerSeq >= e.currentMetaArchive.GetEndLedgerSequence() { + // Current archive is full, send it for upload + select { + case e.metaArchiveCh <- e.currentMetaArchive: + e.currentMetaArchive = nil + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + +// Run iterates over the specified range of ledgers, retrieves ledger data +// from the backend, and processes the corresponding ledger close metadata. +// The process continues until the ending ledger number is reached or a cancellation +// signal is received. +func (e *exportManager) Run(ctx context.Context, startLedger, endLedger uint32) error { + + // Close the object channel + defer close(e.metaArchiveCh) + + for nextLedger := startLedger; endLedger < 1 || nextLedger <= endLedger; nextLedger++ { + select { + case <-ctx.Done(): + logger.Info("Stopping ExportManager due to context cancellation") + return ctx.Err() + default: + ledgerCloseMeta, err := e.ledgerBackend.GetLedger(ctx, nextLedger) + if err != nil { + return errors.Wrapf(err, "failed to retrieve ledger %d from the ledger backend", nextLedger) + } + err = e.AddLedgerCloseMeta(ctx, ledgerCloseMeta) + if err != nil { + return errors.Wrapf(err, "failed to add ledgerCloseMeta for ledger %d", nextLedger) + } + } + } + logger.Infof("ExportManager successfully exported ledgers from %d to %d", startLedger, endLedger) + return nil +} diff --git a/exp/services/ledgerexporter/internal/exportmanager_test.go b/exp/services/ledgerexporter/internal/exportmanager_test.go new file mode 100644 index 0000000000..f6f330ec08 --- /dev/null +++ b/exp/services/ledgerexporter/internal/exportmanager_test.go @@ -0,0 +1,156 @@ +package ledgerexporter + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/collections/set" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestExporterSuite(t *testing.T) { + suite.Run(t, new(ExportManagerSuite)) +} + +// ExportManagerSuite is a test suite for the ExportManager. +type ExportManagerSuite struct { + suite.Suite + ctx context.Context + mockBackend ledgerbackend.MockDatabaseBackend +} + +func (s *ExportManagerSuite) SetupTest() { + s.ctx = context.Background() + s.mockBackend = ledgerbackend.MockDatabaseBackend{} +} + +func (s *ExportManagerSuite) TearDownTest() { + s.mockBackend.AssertExpectations(s.T()) +} + +func (s *ExportManagerSuite) TestRun() { + config := ExporterConfig{LedgersPerFile: 64, FilesPerPartition: 10} + exporter := NewExportManager(config, &s.mockBackend) + + start := uint32(0) + end := uint32(255) + expectedKeys := set.NewSet[string](10) + for i := start; i <= end; i++ { + s.mockBackend.On("GetLedger", s.ctx, i). + Return(createLedgerCloseMeta(i), nil) + key, _ := GetObjectKeyFromSequenceNumber(config, i) + expectedKeys.Add(key) + } + + actualKeys := set.NewSet[string](10) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for v := range exporter.GetMetaArchiveChannel() { + actualKeys.Add(v.objectKey) + } + }() + + err := exporter.Run(s.ctx, start, end) + require.NoError(s.T(), err) + + wg.Wait() + + require.Equal(s.T(), expectedKeys, actualKeys) +} + +func (s *ExportManagerSuite) TestRunContextCancel() { + config := ExporterConfig{LedgersPerFile: 1, FilesPerPartition: 1} + exporter := NewExportManager(config, &s.mockBackend) + ctx, cancel := context.WithCancel(context.Background()) + + s.mockBackend.On("GetLedger", mock.Anything, mock.Anything). + Return(createLedgerCloseMeta(1), nil) + + go func() { + <-time.After(time.Second * 1) + cancel() + }() + + go func() { + ch := exporter.GetMetaArchiveChannel() + for i := 0; i < 127; i++ { + <-ch + } + }() + + err := exporter.Run(ctx, 0, 255) + require.EqualError(s.T(), err, "failed to add ledgerCloseMeta for ledger 128: context canceled") + +} + +func (s *ExportManagerSuite) TestRunWithCanceledContext() { + config := ExporterConfig{LedgersPerFile: 1, FilesPerPartition: 10} + exporter := NewExportManager(config, &s.mockBackend) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := exporter.Run(ctx, 1, 10) + require.EqualError(s.T(), err, "context canceled") +} + +func (s *ExportManagerSuite) TestAddLedgerCloseMeta() { + config := ExporterConfig{LedgersPerFile: 1, FilesPerPartition: 10} + exporter := NewExportManager(config, &s.mockBackend) + objectCh := exporter.GetMetaArchiveChannel() + expectedkeys := set.NewSet[string](10) + actualKeys := set.NewSet[string](10) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for v := range objectCh { + actualKeys.Add(v.objectKey) + } + }() + + start := uint32(0) + end := uint32(255) + for i := start; i <= end; i++ { + require.NoError(s.T(), exporter.AddLedgerCloseMeta(context.Background(), createLedgerCloseMeta(i))) + + key, err := GetObjectKeyFromSequenceNumber(config, i) + require.NoError(s.T(), err) + expectedkeys.Add(key) + } + + close(objectCh) + wg.Wait() + require.Equal(s.T(), expectedkeys, actualKeys) +} + +func (s *ExportManagerSuite) TestAddLedgerCloseMetaContextCancel() { + config := ExporterConfig{LedgersPerFile: 1, FilesPerPartition: 10} + exporter := NewExportManager(config, &s.mockBackend) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-time.After(time.Second * 1) + cancel() + }() + + require.NoError(s.T(), exporter.AddLedgerCloseMeta(ctx, createLedgerCloseMeta(1))) + err := exporter.AddLedgerCloseMeta(ctx, createLedgerCloseMeta(2)) + require.EqualError(s.T(), err, "context canceled") +} + +func (s *ExportManagerSuite) TestAddLedgerCloseMetaKeyMismatch() { + config := ExporterConfig{LedgersPerFile: 10, FilesPerPartition: 1} + exporter := NewExportManager(config, &s.mockBackend) + + require.NoError(s.T(), exporter.AddLedgerCloseMeta(context.Background(), createLedgerCloseMeta(16))) + require.EqualError(s.T(), exporter.AddLedgerCloseMeta(context.Background(), createLedgerCloseMeta(21)), + "Current meta archive object key mismatch") +} diff --git a/exp/services/ledgerexporter/internal/gcs_datastore.go b/exp/services/ledgerexporter/internal/gcs_datastore.go new file mode 100644 index 0000000000..4fa1287e94 --- /dev/null +++ b/exp/services/ledgerexporter/internal/gcs_datastore.go @@ -0,0 +1,105 @@ +package ledgerexporter + +import ( + "context" + "io" + "net/http" + "os" + "path" + + "google.golang.org/api/googleapi" + + "cloud.google.com/go/storage" + "github.com/stellar/go/support/errors" +) + +// GCSDataStore implements DataStore for GCS +type GCSDataStore struct { + client *storage.Client + bucket *storage.BucketHandle + prefix string +} + +// GetFile retrieves a file from the GCS bucket. +func (b *GCSDataStore) GetFile(ctx context.Context, filePath string) (io.ReadCloser, error) { + filePath = path.Join(b.prefix, filePath) + r, err := b.bucket.Object(filePath).NewReader(ctx) + if err != nil { + if gcsError, ok := err.(*googleapi.Error); ok { + logger.Errorf("GCS error: %s %s", gcsError.Message, gcsError.Body) + } + return nil, errors.Wrapf(err, "error retrieving file: %s", filePath) + } + logger.Infof("File retrieved successfully: %s", filePath) + return r, nil +} + +// PutFileIfNotExists uploads a file to GCS only if it doesn't already exist. +func (b *GCSDataStore) PutFileIfNotExists(ctx context.Context, filePath string, in io.WriterTo) error { + err := b.putFile(ctx, filePath, in, &storage.Conditions{DoesNotExist: true}) + if err != nil { + if gcsError, ok := err.(*googleapi.Error); ok { + switch gcsError.Code { + case http.StatusPreconditionFailed: + logger.Infof("Precondition failed: %s already exists in the bucket", filePath) + return nil // Treat as success + default: + logger.Errorf("GCS error: %s %s", gcsError.Message, gcsError.Body) + } + } + return errors.Wrapf(err, "error uploading file: %s", filePath) + } + logger.Infof("File uploaded successfully: %s", filePath) + return nil +} + +// PutFile uploads a file to GCS +func (b *GCSDataStore) PutFile(ctx context.Context, filePath string, in io.WriterTo) error { + err := b.putFile(ctx, filePath, in, nil) // No conditions for regular PutFile + + if err != nil { + if gcsError, ok := err.(*googleapi.Error); ok { + logger.Errorf("GCS error: %s %s", gcsError.Message, gcsError.Body) + } + return errors.Wrapf(err, "error uploading file: %v", filePath) + } + logger.Infof("File uploaded successfully: %s", filePath) + return nil +} + +// Size retrieves the size of a file in the GCS bucket. +func (b *GCSDataStore) Size(ctx context.Context, pth string) (int64, error) { + pth = path.Join(b.prefix, pth) + attrs, err := b.bucket.Object(pth).Attrs(ctx) + if err == storage.ErrObjectNotExist { + err = os.ErrNotExist + } + if err != nil { + return 0, err + } + return attrs.Size, nil +} + +// Exists checks if a file exists in the GCS bucket. +func (b *GCSDataStore) Exists(ctx context.Context, pth string) (bool, error) { + _, err := b.Size(ctx, pth) + return err == nil, err +} + +// Close closes the GCS client connection. +func (b *GCSDataStore) Close() error { + return b.client.Close() +} + +func (b *GCSDataStore) putFile(ctx context.Context, filePath string, in io.WriterTo, conditions *storage.Conditions) error { + filePath = path.Join(b.prefix, filePath) + o := b.bucket.Object(filePath) + if conditions != nil { + o = o.If(*conditions) + } + w := o.NewWriter(ctx) + if _, err := in.WriteTo(w); err != nil { + return errors.Wrapf(err, "failed to put file: %s", filePath) + } + return w.Close() +} diff --git a/exp/services/ledgerexporter/internal/ledger_meta_archive.go b/exp/services/ledgerexporter/internal/ledger_meta_archive.go new file mode 100644 index 0000000000..2a193f812c --- /dev/null +++ b/exp/services/ledgerexporter/internal/ledger_meta_archive.go @@ -0,0 +1,65 @@ +package ledgerexporter + +import ( + "fmt" + + "github.com/stellar/go/xdr" +) + +// LedgerMetaArchive represents a file with metadata and binary data. +type LedgerMetaArchive struct { + // file name + objectKey string + // Actual binary data + data xdr.LedgerCloseMetaBatch +} + +// NewLedgerMetaArchive creates a new LedgerMetaArchive instance. +func NewLedgerMetaArchive(key string, startSeq uint32, endSeq uint32) *LedgerMetaArchive { + return &LedgerMetaArchive{ + objectKey: key, + data: xdr.LedgerCloseMetaBatch{ + StartSequence: xdr.Uint32(startSeq), + EndSequence: xdr.Uint32(endSeq), + }, + } +} + +// AddLedger adds a LedgerCloseMeta to the archive. +func (f *LedgerMetaArchive) AddLedger(ledgerCloseMeta xdr.LedgerCloseMeta) error { + if ledgerCloseMeta.LedgerSequence() < uint32(f.data.StartSequence) || + ledgerCloseMeta.LedgerSequence() > uint32(f.data.EndSequence) { + return fmt.Errorf("ledger sequence %d is outside valid range [%d, %d]", + ledgerCloseMeta.LedgerSequence(), f.data.StartSequence, f.data.EndSequence) + } + + if len(f.data.LedgerCloseMetas) > 0 { + lastSequence := f.data.LedgerCloseMetas[len(f.data.LedgerCloseMetas)-1].LedgerSequence() + if ledgerCloseMeta.LedgerSequence() != lastSequence+1 { + return fmt.Errorf("ledgers must be added sequentially: expected sequence %d, got %d", + lastSequence+1, ledgerCloseMeta.LedgerSequence()) + } + } + f.data.LedgerCloseMetas = append(f.data.LedgerCloseMetas, ledgerCloseMeta) + return nil +} + +// GetLedgerCount returns the number of ledgers currently in the archive. +func (f *LedgerMetaArchive) GetLedgerCount() uint32 { + return uint32(len(f.data.LedgerCloseMetas)) +} + +// GetStartLedgerSequence returns the starting ledger sequence of the archive. +func (f *LedgerMetaArchive) GetStartLedgerSequence() uint32 { + return uint32(f.data.StartSequence) +} + +// GetEndLedgerSequence returns the ending ledger sequence of the archive. +func (f *LedgerMetaArchive) GetEndLedgerSequence() uint32 { + return uint32(f.data.EndSequence) +} + +// GetObjectKey returns the object key of the archive. +func (f *LedgerMetaArchive) GetObjectKey() string { + return f.objectKey +} diff --git a/exp/services/ledgerexporter/internal/ledger_meta_archive_test.go b/exp/services/ledgerexporter/internal/ledger_meta_archive_test.go new file mode 100644 index 0000000000..3403cbaafa --- /dev/null +++ b/exp/services/ledgerexporter/internal/ledger_meta_archive_test.go @@ -0,0 +1,84 @@ +package ledgerexporter + +import ( + "fmt" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/require" +) + +func createLedgerCloseMeta(ledgerSeq uint32) xdr.LedgerCloseMeta { + return xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(ledgerSeq), + }, + }, + }, + } +} + +func TestLedgerMetaArchive_AddLedgerValidRange(t *testing.T) { + + tests := []struct { + name string + startSeq uint32 + endSeq uint32 + seqNum uint32 + errMsg string + }{ + {startSeq: 10, endSeq: 100, seqNum: 10, errMsg: ""}, + {startSeq: 10, endSeq: 100, seqNum: 11, errMsg: ""}, + {startSeq: 10, endSeq: 100, seqNum: 99, errMsg: ""}, + {startSeq: 10, endSeq: 100, seqNum: 100, errMsg: ""}, + {startSeq: 10, endSeq: 100, seqNum: 9, errMsg: "ledger sequence 9 is outside valid range [10, 100]"}, + {startSeq: 10, endSeq: 100, seqNum: 101, errMsg: "ledger sequence 101 is outside valid range [10, 100]"}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("range [%d, %d]: Add seq %d", tt.startSeq, tt.endSeq, tt.seqNum), + func(t *testing.T) { + f := NewLedgerMetaArchive("", tt.startSeq, tt.endSeq) + err := f.AddLedger(createLedgerCloseMeta(tt.seqNum)) + if tt.errMsg != "" { + require.EqualError(t, err, tt.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} +func TestLedgerMetaArchive_AddLedgerSequential(t *testing.T) { + var start, end uint32 = 1, 100 + f := NewLedgerMetaArchive("", start, end+100) + + // Add ledgers sequentially + for i := start; i <= end; i++ { + require.NoError(t, f.AddLedger(createLedgerCloseMeta(i))) + } + + // Test out of sequence + testCases := []struct { + ledgerSeq uint32 + expectedErrMsg string + }{ + { + end + 2, + fmt.Sprintf("ledgers must be added sequentially: expected sequence %d, got %d", end+1, end+2), + }, + { + end, + fmt.Sprintf("ledgers must be added sequentially: expected sequence %d, got %d", end+1, end), + }, + { + end - 1, + fmt.Sprintf("ledgers must be added sequentially: expected sequence %d, got %d", end+1, end-1), + }, + } + + for _, tc := range testCases { + err := f.AddLedger(createLedgerCloseMeta(tc.ledgerSeq)) + require.EqualError(t, err, tc.expectedErrMsg) + } +} diff --git a/exp/services/ledgerexporter/internal/mock_datastore.go b/exp/services/ledgerexporter/internal/mock_datastore.go new file mode 100644 index 0000000000..7675a87461 --- /dev/null +++ b/exp/services/ledgerexporter/internal/mock_datastore.go @@ -0,0 +1,43 @@ +package ledgerexporter + +import ( + "context" + "io" + + "github.com/stretchr/testify/mock" +) + +// MockDataStore is a mock implementation for the Storage interface. +type MockDataStore struct { + mock.Mock +} + +func (m *MockDataStore) Exists(ctx context.Context, path string) (bool, error) { + args := m.Called(ctx, path) + return args.Bool(0), args.Error(1) +} + +func (m *MockDataStore) Size(ctx context.Context, path string) (int64, error) { + args := m.Called(ctx, path) + return args.Get(0).(int64), args.Error(1) +} + +func (m *MockDataStore) GetFile(ctx context.Context, path string) (io.ReadCloser, error) { + args := m.Called(ctx, path) + return args.Get(0).(io.ReadCloser), args.Error(1) +} + +func (m *MockDataStore) PutFile(ctx context.Context, path string, in io.WriterTo) error { + args := m.Called(ctx, path, in) + return args.Error(0) +} + +func (m *MockDataStore) PutFileIfNotExists(ctx context.Context, path string, in io.WriterTo) error { + args := m.Called(ctx, path, in) + return args.Error(0) +} + +func (m *MockDataStore) Close() error { + args := m.Called() + return args.Error(0) +} diff --git a/exp/services/ledgerexporter/internal/uploader.go b/exp/services/ledgerexporter/internal/uploader.go new file mode 100644 index 0000000000..633db4ead7 --- /dev/null +++ b/exp/services/ledgerexporter/internal/uploader.go @@ -0,0 +1,74 @@ +package ledgerexporter + +import ( + "context" + "time" + + "github.com/pkg/errors" +) + +// Uploader is responsible for uploading data to a storage destination. +type Uploader interface { + Run(ctx context.Context) error + Upload(ctx context.Context, metaArchive *LedgerMetaArchive) error +} + +type uploader struct { + dataStore DataStore + metaArchiveCh chan *LedgerMetaArchive +} + +func NewUploader(destination DataStore, metaArchiveCh chan *LedgerMetaArchive) Uploader { + return &uploader{ + dataStore: destination, + metaArchiveCh: metaArchiveCh, + } +} + +// Upload uploads the serialized binary data of ledger TxMeta to the specified destination. +// TODO: Add retry logic. +func (u *uploader) Upload(ctx context.Context, metaArchive *LedgerMetaArchive) error { + logger.Infof("Uploading: %s", metaArchive.GetObjectKey()) + + err := u.dataStore.PutFileIfNotExists(ctx, metaArchive.GetObjectKey(), + &XDRGzipEncoder{XdrPayload: &metaArchive.data}) + if err != nil { + return errors.Wrapf(err, "error uploading %s", metaArchive.GetObjectKey()) + } + return nil +} + +// TODO: make it configurable +var uploaderShutdownWaitTime = 10 * time.Second + +// Run starts the uploader, continuously listening for LedgerMetaArchive objects to upload. +func (u *uploader) Run(ctx context.Context) error { + uploadCtx, cancel := context.WithCancel(context.Background()) + go func() { + <-ctx.Done() + logger.Info("Context done, waiting for remaining uploads to complete...") + // wait for a few seconds to upload remaining objects from metaArchiveCh + <-time.After(uploaderShutdownWaitTime) + logger.Info("Timeout reached, canceling remaining uploads...") + cancel() + }() + + for { + select { + case <-uploadCtx.Done(): + return uploadCtx.Err() + + case metaObject, ok := <-u.metaArchiveCh: + if !ok { + logger.Info("Meta archive channel closed, stopping uploader") + return nil + } + //Upload the received LedgerMetaArchive. + err := u.Upload(uploadCtx, metaObject) + if err != nil { + return err + } + logger.Infof("Uploaded %s successfully", metaObject.objectKey) + } + } +} diff --git a/exp/services/ledgerexporter/internal/uploader_test.go b/exp/services/ledgerexporter/internal/uploader_test.go new file mode 100644 index 0000000000..c2a0fb96ab --- /dev/null +++ b/exp/services/ledgerexporter/internal/uploader_test.go @@ -0,0 +1,127 @@ +package ledgerexporter + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestUploaderSuite(t *testing.T) { + suite.Run(t, new(UploaderSuite)) +} + +// UploaderSuite is a test suite for the Uploader. +type UploaderSuite struct { + suite.Suite + ctx context.Context + mockDataStore MockDataStore +} + +func (s *UploaderSuite) SetupTest() { + s.ctx = context.Background() + s.mockDataStore = MockDataStore{} +} + +func (s *UploaderSuite) TestUpload() { + key, start, end := "test-1-100", uint32(1), uint32(100) + archive := NewLedgerMetaArchive(key, start, end) + for i := start; i <= end; i++ { + _ = archive.AddLedger(createLedgerCloseMeta(i)) + } + + var capturedWriterTo io.WriterTo + var capturedKey string + s.mockDataStore.On("PutFileIfNotExists", mock.Anything, key, mock.Anything). + Run(func(args mock.Arguments) { + capturedKey = args.Get(1).(string) + capturedWriterTo = args.Get(2).(io.WriterTo) + }).Return(nil).Once() + + dataUploader := uploader{dataStore: &s.mockDataStore} + require.NoError(s.T(), dataUploader.Upload(context.Background(), archive)) + + var capturedBuf bytes.Buffer + _, err := capturedWriterTo.WriteTo(&capturedBuf) + require.NoError(s.T(), err) + + var decodedArchive LedgerMetaArchive + decoder := &XDRGzipDecoder{XdrPayload: &decodedArchive.data} + _, err = decoder.ReadFrom(&capturedBuf) + require.NoError(s.T(), err) + + // require that the decoded data matches the original test data + require.Equal(s.T(), key, capturedKey) + require.Equal(s.T(), archive.data, decodedArchive.data) +} + +func (s *UploaderSuite) TestUploadPutError() { + key, start, end := "test-1-100", uint32(1), uint32(100) + archive := NewLedgerMetaArchive(key, start, end) + + s.mockDataStore.On("PutFileIfNotExists", context.Background(), key, + mock.Anything).Return(errors.New("error in PutFileIfNotExists")) + + dataUploader := uploader{dataStore: &s.mockDataStore} + err := dataUploader.Upload(context.Background(), archive) + require.Equal(s.T(), fmt.Sprintf("error uploading %s: error in PutFileIfNotExists", key), err.Error()) +} + +func (s *UploaderSuite) TestRunChannelClose() { + s.mockDataStore.On("PutFileIfNotExists", mock.Anything, + mock.Anything, mock.Anything).Return(nil) + + objectCh := make(chan *LedgerMetaArchive, 1) + go func() { + key, start, end := "test", uint32(1), uint32(100) + for i := start; i <= end; i++ { + objectCh <- NewLedgerMetaArchive(key, i, i) + } + <-time.After(time.Second * 2) + close(objectCh) + }() + + dataUploader := uploader{dataStore: &s.mockDataStore, metaArchiveCh: objectCh} + require.NoError(s.T(), dataUploader.Run(context.Background())) +} + +func (s *UploaderSuite) TestRunContextCancel() { + objectCh := make(chan *LedgerMetaArchive, 1) + s.mockDataStore.On("PutFileIfNotExists", mock.Anything, mock.Anything, mock.Anything).Return(nil) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + for { + objectCh <- NewLedgerMetaArchive("test", 1, 1) + } + }() + + go func() { + <-time.After(time.Second * 2) + cancel() + }() + + dataUploader := uploader{dataStore: &s.mockDataStore, metaArchiveCh: objectCh} + err := dataUploader.Run(ctx) + + require.EqualError(s.T(), err, "context canceled") +} + +func (s *UploaderSuite) TestRunUploadError() { + objectCh := make(chan *LedgerMetaArchive, 10) + objectCh <- NewLedgerMetaArchive("test", 1, 1) + + s.mockDataStore.On("PutFileIfNotExists", mock.Anything, "test", + mock.Anything).Return(errors.New("Put error")) + + dataUploader := uploader{dataStore: &s.mockDataStore, metaArchiveCh: objectCh} + err := dataUploader.Run(context.Background()) + require.Equal(s.T(), "error uploading test: Put error", err.Error()) +} diff --git a/exp/services/ledgerexporter/internal/utils.go b/exp/services/ledgerexporter/internal/utils.go new file mode 100644 index 0000000000..d1bc8e20d1 --- /dev/null +++ b/exp/services/ledgerexporter/internal/utils.go @@ -0,0 +1,104 @@ +package ledgerexporter + +import ( + "compress/gzip" + "fmt" + "io" + + xdr3 "github.com/stellar/go-xdr/xdr3" + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/storage" +) + +const ( + fileSuffix = ".xdr.gz" +) + +// GetObjectKeyFromSequenceNumber generates the file name from the ledger sequence number based on configuration. +func GetObjectKeyFromSequenceNumber(config ExporterConfig, ledgerSeq uint32) (string, error) { + var objectKey string + + if config.LedgersPerFile < 1 { + return "", errors.Errorf("Invalid ledgers per file (%d): must be at least 1", config.LedgersPerFile) + } + + if config.FilesPerPartition > 1 { + partitionSize := config.LedgersPerFile * config.FilesPerPartition + partitionStart := (ledgerSeq / partitionSize) * partitionSize + partitionEnd := partitionStart + partitionSize - 1 + objectKey = fmt.Sprintf("%d-%d/", partitionStart, partitionEnd) + } + + fileStart := (ledgerSeq / config.LedgersPerFile) * config.LedgersPerFile + fileEnd := fileStart + config.LedgersPerFile - 1 + objectKey += fmt.Sprintf("%d", fileStart) + + // Multiple ledgers per file + if fileStart != fileEnd { + objectKey += fmt.Sprintf("-%d", fileEnd) + } + objectKey += fileSuffix + + return objectKey, nil +} + +// getLatestLedgerSequenceFromHistoryArchives returns the most recent ledger sequence (checkpoint ledger) +// number present in the history archives. +func getLatestLedgerSequenceFromHistoryArchives(historyArchivesURLs []string) (uint32, error) { + for _, historyArchiveURL := range historyArchivesURLs { + ha, err := historyarchive.Connect( + historyArchiveURL, + historyarchive.ArchiveOptions{ + ConnectOptions: storage.ConnectOptions{ + UserAgent: "ledger-exporter", + }, + }, + ) + if err != nil { + logger.WithError(err).Warnf("Error connecting to history archive %s", historyArchiveURL) + continue // Skip to next archive + } + + has, err := ha.GetRootHAS() + if err != nil { + logger.WithError(err).Warnf("Error getting RootHAS for %s", historyArchiveURL) + continue // Skip to next archive + } + + return has.CurrentLedger, nil + } + + return 0, errors.New("failed to retrieve the latest ledger sequence from any history archive") +} + +type XDRGzipEncoder struct { + XdrPayload interface{} +} + +func (g *XDRGzipEncoder) WriteTo(w io.Writer) (int64, error) { + gw := gzip.NewWriter(w) + n, err := xdr3.Marshal(gw, g.XdrPayload) + if err != nil { + return int64(n), err + } + return int64(n), gw.Close() +} + +type XDRGzipDecoder struct { + XdrPayload interface{} +} + +func (d *XDRGzipDecoder) ReadFrom(r io.Reader) (int64, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return 0, err + } + defer gr.Close() + + n, err := xdr3.Unmarshal(gr, d.XdrPayload) + if err != nil { + return int64(n), err + } + return int64(n), nil +} diff --git a/exp/services/ledgerexporter/internal/utils_test.go b/exp/services/ledgerexporter/internal/utils_test.go new file mode 100644 index 0000000000..c11b500c21 --- /dev/null +++ b/exp/services/ledgerexporter/internal/utils_test.go @@ -0,0 +1,87 @@ +package ledgerexporter + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/require" +) + +func TestGetObjectKeyFromSequenceNumber(t *testing.T) { + testCases := []struct { + filesPerPartition uint32 + ledgerSeq uint32 + ledgersPerFile uint32 + expectedKey string + expectedError bool + }{ + {0, 5, 1, "5.xdr.gz", false}, + {0, 5, 10, "0-9.xdr.gz", false}, + {2, 5, 0, "", true}, + {2, 10, 100, "0-199/0-99.xdr.gz", false}, + {2, 150, 50, "100-199/150-199.xdr.gz", false}, + {2, 300, 200, "0-399/200-399.xdr.gz", false}, + {2, 1, 1, "0-1/1.xdr.gz", false}, + {4, 10, 100, "0-399/0-99.xdr.gz", false}, + {4, 250, 50, "200-399/250-299.xdr.gz", false}, + {1, 300, 200, "200-399.xdr.gz", false}, + {1, 1, 1, "1.xdr.gz", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("LedgerSeq-%d-LedgersPerFile-%d", tc.ledgerSeq, tc.ledgersPerFile), func(t *testing.T) { + config := ExporterConfig{FilesPerPartition: tc.filesPerPartition, LedgersPerFile: tc.ledgersPerFile} + key, err := GetObjectKeyFromSequenceNumber(config, tc.ledgerSeq) + + if tc.expectedError { + require.Error(t, err) + require.Empty(t, key) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedKey, key) + } + }) + } +} + +func createTestLedgerCloseMetaBatch(startSeq, endSeq uint32, count int) xdr.LedgerCloseMetaBatch { + var ledgerCloseMetas []xdr.LedgerCloseMeta + for i := 0; i < count; i++ { + ledgerCloseMetas = append(ledgerCloseMetas, createLedgerCloseMeta(startSeq+uint32(i))) + } + return xdr.LedgerCloseMetaBatch{ + StartSequence: xdr.Uint32(startSeq), + EndSequence: xdr.Uint32(endSeq), + LedgerCloseMetas: ledgerCloseMetas, + } +} + +func TestEncodeDecodeLedgerCloseMetaBatch(t *testing.T) { + testData := createTestLedgerCloseMetaBatch(1000, 1005, 6) + + // Encode the test data + var encoder XDRGzipEncoder + encoder.XdrPayload = testData + + var buf bytes.Buffer + _, err := encoder.WriteTo(&buf) + require.NoError(t, err) + + // Decode the encoded data + var decoder XDRGzipDecoder + decoder.XdrPayload = &xdr.LedgerCloseMetaBatch{} + + _, err = decoder.ReadFrom(&buf) + require.NoError(t, err) + + // Check if the decoded data matches the original test data + decodedData := decoder.XdrPayload.(*xdr.LedgerCloseMetaBatch) + require.Equal(t, testData.StartSequence, decodedData.StartSequence) + require.Equal(t, testData.EndSequence, decodedData.EndSequence) + require.Equal(t, len(testData.LedgerCloseMetas), len(decodedData.LedgerCloseMetas)) + for i := range testData.LedgerCloseMetas { + require.Equal(t, testData.LedgerCloseMetas[i], decodedData.LedgerCloseMetas[i]) + } +} diff --git a/exp/services/ledgerexporter/main.go b/exp/services/ledgerexporter/main.go index 42cf1d6ae8..f1a81e95ba 100644 --- a/exp/services/ledgerexporter/main.go +++ b/exp/services/ledgerexporter/main.go @@ -1,180 +1,8 @@ package main -import ( - "bytes" - "context" - "flag" - "io" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stellar/go/ingest/ledgerbackend" - "github.com/stellar/go/network" - supportlog "github.com/stellar/go/support/log" - "github.com/stellar/go/support/storage" - "github.com/stellar/go/xdr" -) - -var logger = supportlog.New() +import exporter "github.com/stellar/go/exp/services/ledgerexporter/internal" func main() { - targetUrl := flag.String("target", "gcs://horizon-archive-poc", "history archive url to write txmeta files") - stellarCoreBinaryPath := flag.String("stellar-core-binary-path", os.Getenv("STELLAR_CORE_BINARY_PATH"), "path to the stellar core binary") - networkPassphrase := flag.String("network-passphrase", network.TestNetworkPassphrase, "network passphrase") - historyArchiveUrls := flag.String("history-archive-urls", "https://history.stellar.org/prd/core-testnet/core_testnet_001", "comma-separated list of history archive urls to read from") - captiveCoreTomlPath := flag.String("captive-core-toml-path", os.Getenv("CAPTIVE_CORE_TOML_PATH"), "path to load captive core toml file from") - startingLedger := flag.Uint("start-ledger", 2, "ledger to start export from") - continueFromLatestLedger := flag.Bool("continue", false, "start export from the last exported ledger (as indicated in the target's /latest path)") - endingLedger := flag.Uint("end-ledger", 0, "ledger at which to stop the export (must be a closed ledger), 0 means no ending") - writeLatestPath := flag.Bool("write-latest-path", true, "update the value of the /latest path on the target") - captiveCoreUseDb := flag.Bool("captive-core-use-db", true, "configure captive core to store database on disk in working directory rather than in memory") - flag.Parse() - - logger.SetLevel(supportlog.InfoLevel) - - params := ledgerbackend.CaptiveCoreTomlParams{ - NetworkPassphrase: *networkPassphrase, - HistoryArchiveURLs: strings.Split(*historyArchiveUrls, ","), - UseDB: *captiveCoreUseDb, - } - if *captiveCoreTomlPath == "" { - logger.Fatal("Missing -captive-core-toml-path flag") - } - - captiveCoreToml, err := ledgerbackend.NewCaptiveCoreTomlFromFile(*captiveCoreTomlPath, params) - logFatalIf(err, "Invalid captive core toml") - - captiveConfig := ledgerbackend.CaptiveCoreConfig{ - BinaryPath: *stellarCoreBinaryPath, - NetworkPassphrase: params.NetworkPassphrase, - HistoryArchiveURLs: params.HistoryArchiveURLs, - CheckpointFrequency: 64, - Log: logger.WithField("subservice", "stellar-core"), - Toml: captiveCoreToml, - UseDB: *captiveCoreUseDb, - } - core, err := ledgerbackend.NewCaptive(captiveConfig) - logFatalIf(err, "Could not create captive core instance") - - target, err := storage.ConnectBackend( - *targetUrl, - storage.ConnectOptions{ - Context: context.Background(), - S3WriteACL: s3.ObjectCannedACLBucketOwnerFullControl, - }, - ) - logFatalIf(err, "Could not connect to target") - defer target.Close() - - // Build the appropriate range for the given backend state. - startLedger := uint32(*startingLedger) - endLedger := uint32(*endingLedger) - - logger.Infof("processing requested range of -start-ledger=%v, -end-ledger=%v", startLedger, endLedger) - if *continueFromLatestLedger { - if startLedger != 0 { - logger.Fatalf("-start-ledger and -continue cannot both be set") - } - startLedger = readLatestLedger(target) - logger.Infof("continue flag was enabled, next ledger found was %v", startLedger) - } - - if startLedger < 2 { - logger.Fatalf("-start-ledger must be >= 2") - } - if endLedger != 0 && endLedger < startLedger { - logger.Fatalf("-end-ledger must be >= -start-ledger") - } - - var ledgerRange ledgerbackend.Range - if endLedger == 0 { - ledgerRange = ledgerbackend.UnboundedRange(startLedger) - } else { - ledgerRange = ledgerbackend.BoundedRange(startLedger, endLedger) - } - - logger.Infof("preparing to export %s", ledgerRange) - err = core.PrepareRange(context.Background(), ledgerRange) - logFatalIf(err, "could not prepare range") - - for nextLedger := startLedger; endLedger < 1 || nextLedger <= endLedger; { - ledger, err := core.GetLedger(context.Background(), nextLedger) - if err != nil { - logger.WithError(err).Warnf("could not fetch ledger %v, retrying", nextLedger) - time.Sleep(time.Second) - continue - } - - if err = writeLedger(target, ledger); err != nil { - logger.WithError(err).Warnf( - "could not write ledger object %v, retrying", - uint64(ledger.LedgerSequence())) - continue - } - - if *writeLatestPath { - if err = writeLatestLedger(target, nextLedger); err != nil { - logger.WithError(err).Warnf("could not write latest ledger %v", nextLedger) - } - } - - nextLedger++ - } - -} - -// readLatestLedger determines the latest ledger in the given backend (at the -// /latest path), defaulting to Ledger #2 if one doesn't exist -func readLatestLedger(backend storage.Storage) uint32 { - r, err := backend.GetFile("latest") - if os.IsNotExist(err) { - return 2 - } - - logFatalIf(err, "could not open latest ledger bucket") - defer r.Close() - - var buf bytes.Buffer - _, err = io.Copy(&buf, r) - logFatalIf(err, "could not read latest ledger") - - parsed, err := strconv.ParseUint(buf.String(), 10, 32) - logFatalIf(err, "could not parse latest ledger: %s", buf.String()) - return uint32(parsed) -} - -// writeLedger stores the given LedgerCloseMeta instance as a raw binary at the -// /ledgers/ path. If an error is returned, it may be transient so you -// should attempt to retry. -func writeLedger(backend storage.Storage, ledger xdr.LedgerCloseMeta) error { - toSerialize := xdr.SerializedLedgerCloseMeta{ - V: 0, - V0: &ledger, - } - blob, err := toSerialize.MarshalBinary() - logFatalIf(err, "could not serialize ledger %v", ledger.LedgerSequence()) - return backend.PutFile( - "ledgers/"+strconv.FormatUint(uint64(ledger.LedgerSequence()), 10), - io.NopCloser(bytes.NewReader(blob)), - ) -} - -func writeLatestLedger(backend storage.Storage, ledger uint32) error { - return backend.PutFile( - "latest", - io.NopCloser( - bytes.NewBufferString( - strconv.FormatUint(uint64(ledger), 10), - ), - ), - ) -} - -func logFatalIf(err error, message string, args ...interface{}) { - if err != nil { - logger.WithError(err).Fatalf(message, args...) - } + app := exporter.NewApp() + app.Run() } diff --git a/go.mod b/go.mod index 1ca1ea801c..0e55add07c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( cloud.google.com/go/firestore v1.14.0 // indirect - cloud.google.com/go/storage v1.30.1 + cloud.google.com/go/storage v1.37.0 firebase.google.com/go v3.12.0+incompatible github.com/2opremio/pretty v0.2.2-0.20230601220618-e1d5758b2a95 github.com/BurntSushi/toml v1.3.2 @@ -20,7 +20,7 @@ require ( github.com/go-chi/chi v4.1.2+incompatible github.com/go-errors/errors v1.5.1 github.com/golang-jwt/jwt v3.2.2+incompatible - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.5.0 github.com/gorilla/schema v1.2.0 github.com/graph-gophers/graphql-go v1.3.0 github.com/guregu/null v4.0.0+incompatible @@ -50,13 +50,13 @@ require ( github.com/stretchr/testify v1.8.4 github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8 github.com/xdrpp/goxdr v0.1.1 - google.golang.org/api v0.149.0 + google.golang.org/api v0.157.0 gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 gopkg.in/square/go-jose.v2 v2.4.1 gopkg.in/tylerb/graceful.v1 v1.2.15 ) -require golang.org/x/sync v0.4.0 +require golang.org/x/sync v0.6.0 require ( cloud.google.com/go/compute v1.23.3 // indirect @@ -67,9 +67,10 @@ require ( github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/creachadair/mds v0.0.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gobuffalo/packd v1.0.2 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect @@ -84,21 +85,23 @@ require ( github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/tools v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect gopkg.in/djherbis/atime.v1 v1.0.0 // indirect gopkg.in/djherbis/stream.v1 v1.3.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) require ( - cloud.google.com/go v0.111.0 // indirect + cloud.google.com/go v0.112.0 // indirect github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/goreplay v1.3.2 @@ -139,16 +142,16 @@ require ( github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce // indirect github.com/yudai/pp v2.0.1+incompatible // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 + golang.org/x/time v0.5.0 google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect + google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/grpc v1.60.1 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 6e4158f9b7..1e31cfdb08 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -47,8 +47,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= +cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= firebase.google.com/go v3.12.0+incompatible h1:q70KCp/J0oOL8kJ8oV2j3646kV4TB8Y5IvxXC0WT1bo= firebase.google.com/go v3.12.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= @@ -94,6 +94,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/jrpc2 v1.1.0 h1:SgpJf0v1rVCZx68+4APv6dgsTFsIHlpgFD1NlQAWA0A= github.com/creachadair/jrpc2 v1.1.0/go.mod h1:5jN7MKwsm8qvgfTsTzLX3JIfidsAkZ1c8DZSQmp+g38= @@ -117,8 +118,11 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= @@ -141,8 +145,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -227,8 +231,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -455,13 +459,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -474,8 +482,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -552,8 +560,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -563,8 +571,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -576,8 +584,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -632,8 +640,8 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -649,8 +657,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -707,7 +715,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -727,8 +735,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= +google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -774,12 +782,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 h1:EWIeHfGuUf00zrVZGEgYFxok7plSAXBGcH7NNdMAWvA= -google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= +google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= +google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU= +google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/gxdr/xdr_generated.go b/gxdr/xdr_generated.go index 9d9ab290bb..d4cd83cdf0 100644 --- a/gxdr/xdr_generated.go +++ b/gxdr/xdr_generated.go @@ -1,4 +1,4 @@ -// Code generated by goxdr -p gxdr -enum-comments -o gxdr/xdr_generated.go xdr/Stellar-SCP.x xdr/Stellar-ledger-entries.x xdr/Stellar-ledger.x xdr/Stellar-overlay.x xdr/Stellar-transaction.x xdr/Stellar-types.x xdr/Stellar-contract-env-meta.x xdr/Stellar-contract-meta.x xdr/Stellar-contract-spec.x xdr/Stellar-contract.x xdr/Stellar-internal.x xdr/Stellar-contract-config-setting.x xdr/Stellar-lighthorizon.x; DO NOT EDIT. +// Code generated by goxdr -p gxdr -enum-comments -o gxdr/xdr_generated.go xdr/Stellar-SCP.x xdr/Stellar-ledger-entries.x xdr/Stellar-ledger.x xdr/Stellar-overlay.x xdr/Stellar-transaction.x xdr/Stellar-types.x xdr/Stellar-contract-env-meta.x xdr/Stellar-contract-meta.x xdr/Stellar-contract-spec.x xdr/Stellar-contract.x xdr/Stellar-internal.x xdr/Stellar-contract-config-setting.x xdr/Stellar-lighthorizon.x xdr/Stellar-exporter.x; DO NOT EDIT. package gxdr @@ -4445,6 +4445,16 @@ type SerializedLedgerCloseMeta struct { _u interface{} } +// Batch of ledgers along with their transaction metadata +type LedgerCloseMetaBatch struct { + // starting ledger sequence number in the batch + StartSequence Uint32 + // ending ledger sequence number in the batch + EndSequence Uint32 + // Ledger close meta for each ledger within the batch + LedgerCloseMetas []LedgerCloseMeta +} + // // Helper types and generated marshaling functions // @@ -29398,3 +29408,76 @@ func (u *SerializedLedgerCloseMeta) XdrRecurse(x XDR, name string) { XdrPanic("invalid V (%v) in SerializedLedgerCloseMeta", u.V) } func XDR_SerializedLedgerCloseMeta(v *SerializedLedgerCloseMeta) *SerializedLedgerCloseMeta { return v } + +type _XdrVec_unbounded_LedgerCloseMeta []LedgerCloseMeta + +func (_XdrVec_unbounded_LedgerCloseMeta) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_LedgerCloseMeta) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_LedgerCloseMeta length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_LedgerCloseMeta length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_LedgerCloseMeta) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_LedgerCloseMeta) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]LedgerCloseMeta, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_LedgerCloseMeta) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_LedgerCloseMeta(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_LedgerCloseMeta) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_LedgerCloseMeta) XdrTypeName() string { return "LedgerCloseMeta<>" } +func (v *_XdrVec_unbounded_LedgerCloseMeta) XdrPointer() interface{} { return (*[]LedgerCloseMeta)(v) } +func (v _XdrVec_unbounded_LedgerCloseMeta) XdrValue() interface{} { return ([]LedgerCloseMeta)(v) } +func (v *_XdrVec_unbounded_LedgerCloseMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerCloseMetaBatch = *LedgerCloseMetaBatch + +func (v *LedgerCloseMetaBatch) XdrPointer() interface{} { return v } +func (LedgerCloseMetaBatch) XdrTypeName() string { return "LedgerCloseMetaBatch" } +func (v LedgerCloseMetaBatch) XdrValue() interface{} { return v } +func (v *LedgerCloseMetaBatch) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerCloseMetaBatch) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sstartSequence", name), XDR_Uint32(&v.StartSequence)) + x.Marshal(x.Sprintf("%sendSequence", name), XDR_Uint32(&v.EndSequence)) + x.Marshal(x.Sprintf("%sledgerCloseMetas", name), (*_XdrVec_unbounded_LedgerCloseMeta)(&v.LedgerCloseMetas)) +} +func XDR_LedgerCloseMetaBatch(v *LedgerCloseMetaBatch) *LedgerCloseMetaBatch { return v } diff --git a/ingest/ledgerbackend/configs/captive-core-pubnet.cfg b/ingest/ledgerbackend/configs/captive-core-pubnet.cfg new file mode 100644 index 0000000000..f8b9a33985 --- /dev/null +++ b/ingest/ledgerbackend/configs/captive-core-pubnet.cfg @@ -0,0 +1,195 @@ +# WARNING! Do not use this config in production. Quorum sets should +# be carefully selected manually. +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +FAILURE_SAFETY=1 +HTTP_PORT=11626 +PEER_PORT=11725 + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="satoshipay.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="lobstr.co" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="www.coinqvest.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="publicnode.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.blockdaemon.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN = "www.franklintempleton.com" +QUALITY = "HIGH" + +[[VALIDATORS]] +NAME="sdf_1" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" +ADDRESS="core-live-a.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_2" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" +ADDRESS="core-live-b.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_3" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" +ADDRESS="core-live-c.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_singapore" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" +ADDRESS="stellar-sg-sin.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_iowa" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" +ADDRESS="stellar-us-iowa.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_frankfurt" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" +ADDRESS="stellar-de-fra.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_1_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" +ADDRESS="v1.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_2_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GDXQB3OMMQ6MGG43PWFBZWBFKBBDUZIVSUDAZZTRAWQZKES2CDSE5HKJ" +ADDRESS="v2.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_3_north_america" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" +ADDRESS="v3.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-3-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_4_asia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" +ADDRESS="v4.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-4-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_5_australia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" +ADDRESS="v5.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-5-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_hong_kong" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" +ADDRESS="hongkong.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://hongkong.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_germany" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" +ADDRESS="germany.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_finland" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" +ADDRESS="finland.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="bootes" +HOME_DOMAIN="publicnode.org" +PUBLIC_KEY="GCVJ4Z6TI6Z2SOGENSPXDQ2U4RKH3CNQKYUHNSSPYFPNWTLGS6EBH7I2" +ADDRESS="bootes.publicnode.org" +HISTORY="curl -sf https://bootes-history.publicnode.org/{0} -o {1}" + +[[VALIDATORS]] +NAME="hercules" +HOME_DOMAIN="publicnode.org" +PUBLIC_KEY="GBLJNN3AVZZPG2FYAYTYQKECNWTQYYUUY2KVFN2OUKZKBULXIXBZ4FCT" +ADDRESS="hercules.publicnode.org" +HISTORY="curl -sf https://hercules-history.publicnode.org/{0} -o {1}" + +[[VALIDATORS]] +NAME="lyra" +HOME_DOMAIN="publicnode.org" +PUBLIC_KEY="GCIXVKNFPKWVMKJKVK2V4NK7D4TC6W3BUMXSIJ365QUAXWBRPPJXIR2Z" +ADDRESS="lyra.publicnode.org" +HISTORY="curl -sf https://lyra-history.publicnode.org/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_1" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAAV2GCVFLNN522ORUYFV33E76VPC22E72S75AQ6MBR5V45Z5DWVPWEU" +ADDRESS="stellar-full-validator1.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history1.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_2" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAVXB7SBJRYHSG6KSQHY74N7JAFRL4PFVZCNWW2ARI6ZEKNBJSMSKW7C" +ADDRESS="stellar-full-validator2.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history2.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_3" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAYXZ4PZ7P6QOX7EBHPIZXNWY4KCOBYWJCA4WKWRKC7XIUS3UJPT6EZ4" +ADDRESS="stellar-full-validator3.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history3.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME = "FT_SCV_1" +HOME_DOMAIN = "www.franklintempleton.com" +PUBLIC_KEY = "GARYGQ5F2IJEBCZJCBNPWNWVDOFK7IBOHLJKKSG2TMHDQKEEC6P4PE4V" +ADDRESS = "stellar1.franklintempleton.com:11625" +HISTORY = "curl -sf https://stellar-history-usw.franklintempleton.com/azuswshf401/{0} -o {1}" + +[[VALIDATORS]] +NAME = "FT_SCV_2" +HOME_DOMAIN = "www.franklintempleton.com" +PUBLIC_KEY = "GCMSM2VFZGRPTZKPH5OABHGH4F3AVS6XTNJXDGCZ3MKCOSUBH3FL6DOB" +ADDRESS = "stellar2.franklintempleton.com:11625" +HISTORY = "curl -sf https://stellar-history-usc.franklintempleton.com/azuscshf401/{0} -o {1}" + +[[VALIDATORS]] +NAME = "FT_SCV_3" +HOME_DOMAIN = "www.franklintempleton.com" +PUBLIC_KEY = "GA7DV63PBUUWNUFAF4GAZVXU2OZMYRATDLKTC7VTCG7AU4XUPN5VRX4A" +ADDRESS = "stellar3.franklintempleton.com:11625" +HISTORY = "curl -sf https://stellar-history-ins.franklintempleton.com/azinsshf401/{0} -o {1}" diff --git a/ingest/ledgerbackend/configs/captive-core-testnet.cfg b/ingest/ledgerbackend/configs/captive-core-testnet.cfg new file mode 100644 index 0000000000..9abeecc8f5 --- /dev/null +++ b/ingest/ledgerbackend/configs/captive-core-testnet.cfg @@ -0,0 +1,28 @@ +NETWORK_PASSPHRASE="Test SDF Network ; September 2015" +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/ingest/ledgerbackend/toml.go b/ingest/ledgerbackend/toml.go index e2234fc1f2..bc3ab2247a 100644 --- a/ingest/ledgerbackend/toml.go +++ b/ingest/ledgerbackend/toml.go @@ -2,6 +2,7 @@ package ledgerbackend import ( "bytes" + _ "embed" "fmt" "os" "os/exec" @@ -16,6 +17,14 @@ import ( "github.com/pelletier/go-toml" ) +var ( + //go:embed configs/captive-core-pubnet.cfg + PubnetDefaultConfig []byte + + //go:embed configs/captive-core-testnet.cfg + TestnetDefaultConfig []byte +) + const ( defaultHTTPPort = 11626 defaultFailureSafety = -1 diff --git a/xdr/Stellar-exporter.x b/xdr/Stellar-exporter.x new file mode 100644 index 0000000000..4ac92654b1 --- /dev/null +++ b/xdr/Stellar-exporter.x @@ -0,0 +1,23 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +%#include "xdr/Stellar-ledger.h" + +namespace stellar +{ + +// Batch of ledgers along with their transaction metadata +struct LedgerCloseMetaBatch +{ + // starting ledger sequence number in the batch + uint32 startSequence; + + // ending ledger sequence number in the batch + uint32 endSequence; + + // Ledger close meta for each ledger within the batch + LedgerCloseMeta ledgerCloseMetas<>; +}; + +} diff --git a/xdr/xdr_generated.go b/xdr/xdr_generated.go index ac19618f61..ad832d79b4 100644 --- a/xdr/xdr_generated.go +++ b/xdr/xdr_generated.go @@ -9,6 +9,7 @@ // xdr/Stellar-contract-meta.x // xdr/Stellar-contract-spec.x // xdr/Stellar-contract.x +// xdr/Stellar-exporter.x // xdr/Stellar-internal.x // xdr/Stellar-ledger-entries.x // xdr/Stellar-ledger.x @@ -38,6 +39,7 @@ var XdrFilesSHA256 = map[string]string{ "xdr/Stellar-contract-meta.x": "f01532c11ca044e19d9f9f16fe373e9af64835da473be556b9a807ee3319ae0d", "xdr/Stellar-contract-spec.x": "c7ffa21d2e91afb8e666b33524d307955426ff553a486d670c29217ed9888d49", "xdr/Stellar-contract.x": "7f665e4103e146a88fcdabce879aaaacd3bf9283feb194cc47ff986264c1e315", + "xdr/Stellar-exporter.x": "a00c83d02e8c8382e06f79a191f1fb5abd097a4bbcab8481c67467e3270e0529", "xdr/Stellar-internal.x": "227835866c1b2122d1eaf28839ba85ea7289d1cb681dda4ca619c2da3d71fe00", "xdr/Stellar-ledger-entries.x": "4f8f2324f567a40065f54f696ea1428740f043ea4154f5986d9f499ad00ac333", "xdr/Stellar-ledger.x": "2c842f3fe6e269498af5467f849cf6818554e90babc845f34c87cda471298d0f", @@ -57036,4 +57038,114 @@ func (s SerializedLedgerCloseMeta) xdrType() {} var _ xdrType = (*SerializedLedgerCloseMeta)(nil) +// LedgerCloseMetaBatch is an XDR Struct defines as: +// +// struct LedgerCloseMetaBatch +// { +// // starting ledger sequence number in the batch +// uint32 startSequence; +// +// // ending ledger sequence number in the batch +// uint32 endSequence; +// +// // Ledger close meta for each ledger within the batch +// LedgerCloseMeta ledgerCloseMetas<>; +// }; +type LedgerCloseMetaBatch struct { + StartSequence Uint32 + EndSequence Uint32 + LedgerCloseMetas []LedgerCloseMeta +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerCloseMetaBatch) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.StartSequence.EncodeTo(e); err != nil { + return err + } + if err = s.EndSequence.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.LedgerCloseMetas))); err != nil { + return err + } + for i := 0; i < len(s.LedgerCloseMetas); i++ { + if err = s.LedgerCloseMetas[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*LedgerCloseMetaBatch)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerCloseMetaBatch) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { + if maxDepth == 0 { + return 0, fmt.Errorf("decoding LedgerCloseMetaBatch: %w", ErrMaxDecodingDepthReached) + } + maxDepth -= 1 + var err error + var n, nTmp int + nTmp, err = s.StartSequence.DecodeFrom(d, maxDepth) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %w", err) + } + nTmp, err = s.EndSequence.DecodeFrom(d, maxDepth) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %w", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerCloseMeta: %w", err) + } + s.LedgerCloseMetas = nil + if l > 0 { + if il, ok := d.InputLen(); ok && uint(il) < uint(l) { + return n, fmt.Errorf("decoding LedgerCloseMeta: length (%d) exceeds remaining input length (%d)", l, il) + } + s.LedgerCloseMetas = make([]LedgerCloseMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.LedgerCloseMetas[i].DecodeFrom(d, maxDepth) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerCloseMeta: %w", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerCloseMetaBatch) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerCloseMetaBatch) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + o := xdr.DefaultDecodeOptions + o.MaxInputLen = len(inp) + d := xdr.NewDecoderWithOptions(r, o) + _, err := s.DecodeFrom(d, o.MaxDepth) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerCloseMetaBatch)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerCloseMetaBatch)(nil) +) + +// xdrType signals that this type represents XDR values defined by this package. +func (s LedgerCloseMetaBatch) xdrType() {} + +var _ xdrType = (*LedgerCloseMetaBatch)(nil) + var fmtTest = fmt.Sprint("this is a dummy usage of fmt")