diff --git a/go.mod b/go.mod
index 0e55add07c..0e8d89dac2 100644
--- a/go.mod
+++ b/go.mod
@@ -56,7 +56,10 @@ require (
gopkg.in/tylerb/graceful.v1 v1.2.15
)
-require golang.org/x/sync v0.6.0
+require (
+ github.com/cenkalti/backoff/v4 v4.2.1
+ golang.org/x/sync v0.6.0
+)
require (
cloud.google.com/go/compute v1.23.3 // indirect
diff --git a/go.sum b/go.sum
index 1e31cfdb08..ab7ad33230 100644
--- a/go.sum
+++ b/go.sum
@@ -82,6 +82,8 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/buger/goreplay v1.3.2 h1:MFAStZZCsHMPeN5xJ11rhUtV4ZctFRgzSHTfWSWOJsg=
github.com/buger/goreplay v1.3.2/go.mod h1:EyAKHxJR6K6phd0NaoPETSDbJRB/ogIw3Y15UlSbVBM=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s=
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
diff --git a/historyarchive/archive_pool.go b/historyarchive/archive_pool.go
index 4cb5483f63..259f8ff48a 100644
--- a/historyarchive/archive_pool.go
+++ b/historyarchive/archive_pool.go
@@ -5,15 +5,24 @@
package historyarchive
import (
+ "context"
"math/rand"
+ "time"
- "github.com/stellar/go/support/errors"
+ "github.com/pkg/errors"
+ log "github.com/stellar/go/support/log"
"github.com/stellar/go/xdr"
+
+ backoff "github.com/cenkalti/backoff/v4"
)
-// A ArchivePool is just a collection of `ArchiveInterface`s so that we can
+// An ArchivePool is just a collection of `ArchiveInterface`s so that we can
// distribute requests fairly throughout the pool.
-type ArchivePool []ArchiveInterface
+type ArchivePool struct {
+ backoff backoff.BackOff
+ pool []ArchiveInterface
+ curr int
+}
// NewArchivePool tries connecting to each of the provided history archive URLs,
// returning a pool of valid archives.
@@ -21,39 +30,47 @@ type ArchivePool []ArchiveInterface
// If none of the archives work, this returns the error message of the last
// failed archive. Note that the errors for each individual archive are hard to
// track if there's success overall.
-func NewArchivePool(archiveURLs []string, opts ArchiveOptions) (ArchivePool, error) {
+func NewArchivePool(archiveURLs []string, opts ArchiveOptions) (ArchiveInterface, error) {
+ return NewArchivePoolWithBackoff(
+ archiveURLs,
+ opts,
+ backoff.WithMaxRetries(backoff.NewConstantBackOff(250*time.Millisecond), 3),
+ )
+}
+
+func NewArchivePoolWithBackoff(archiveURLs []string, opts ArchiveOptions, strategy backoff.BackOff) (ArchiveInterface, error) {
if len(archiveURLs) <= 0 {
return nil, errors.New("No history archives provided")
}
- var lastErr error = nil
+ ap := ArchivePool{
+ pool: make([]ArchiveInterface, 0, len(archiveURLs)),
+ backoff: strategy,
+ }
+ var lastErr error
// Try connecting to all of the listed archives, but only store valid ones.
- var validArchives ArchivePool
for _, url := range archiveURLs {
- archive, err := Connect(
- url,
- opts,
- )
-
+ archive, err := Connect(url, opts)
if err != nil {
lastErr = errors.Wrapf(err, "Error connecting to history archive (%s)", url)
continue
}
- validArchives = append(validArchives, archive)
+ ap.pool = append(ap.pool, archive)
}
- if len(validArchives) == 0 {
+ if len(ap.pool) == 0 {
return nil, lastErr
}
- return validArchives, nil
+ ap.curr = rand.Intn(len(ap.pool)) // don't necessarily start at zero
+ return &ap, nil
}
-func (pa ArchivePool) GetStats() []ArchiveStats {
+func (pa *ArchivePool) GetStats() []ArchiveStats {
stats := []ArchiveStats{}
- for _, archive := range pa {
+ for _, archive := range pa.pool {
stats = append(stats, archive.GetStats()...)
}
return stats
@@ -62,80 +79,178 @@ func (pa ArchivePool) GetStats() []ArchiveStats {
// Ensure the pool conforms to the ArchiveInterface
var _ ArchiveInterface = &ArchivePool{}
-// Below are the ArchiveInterface method implementations.
+//
+// These are helpers to round-robin calls through archives.
+//
-func (pa ArchivePool) GetAnyArchive() ArchiveInterface {
- return pa[rand.Intn(len(pa))]
+// getNextArchive statefully round-robins through the pool
+func (pa *ArchivePool) getNextArchive() ArchiveInterface {
+ // Round-robin through the archives
+ pa.curr = (pa.curr + 1) % len(pa.pool)
+ return pa.pool[pa.curr]
}
-func (pa ArchivePool) GetPathHAS(path string) (HistoryArchiveState, error) {
- return pa.GetAnyArchive().GetPathHAS(path)
+// runRoundRobin is a helper method that will run a particular action on every
+// archive in the pool until it succeeds or the pool is exhausted (whichever
+// comes first), repeating with a constant 500ms backoff.
+func (pa *ArchivePool) runRoundRobin(runner func(ai ArchiveInterface) error) error {
+ return backoff.Retry(func() error {
+ var err error
+ ai := pa.getNextArchive()
+ if err = runner(ai); err == nil {
+ return nil
+ }
+
+ if errors.Is(err, context.Canceled) ||
+ errors.Is(err, context.DeadlineExceeded) {
+ return backoff.Permanent(err)
+ }
+
+ // Intentionally avoid logging context errors
+ if stats := ai.GetStats(); len(stats) > 0 {
+ log.WithField("error", err).Warnf(
+ "Encountered an error with archive '%s'",
+ stats[0].GetBackendName())
+ }
+
+ return err
+ }, pa.backoff)
+}
+
+//
+// Below are the ArchiveInterface method implementations.
+//
+
+func (pa *ArchivePool) GetPathHAS(path string) (HistoryArchiveState, error) {
+ has := HistoryArchiveState{}
+ err := pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var innerErr error
+ has, innerErr = ai.GetPathHAS(path)
+ return innerErr
+ })
+ return has, err
}
-func (pa ArchivePool) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error {
- return pa.GetAnyArchive().PutPathHAS(path, has, opts)
+func (pa *ArchivePool) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error {
+ return pa.runRoundRobin(func(ai ArchiveInterface) error {
+ return ai.PutPathHAS(path, has, opts)
+ })
}
-func (pa ArchivePool) BucketExists(bucket Hash) (bool, error) {
- return pa.GetAnyArchive().BucketExists(bucket)
+func (pa *ArchivePool) BucketExists(bucket Hash) (bool, error) {
+ status := false
+ return status, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ status, err = ai.BucketExists(bucket)
+ return err
+ })
}
-func (pa ArchivePool) BucketSize(bucket Hash) (int64, error) {
- return pa.GetAnyArchive().BucketSize(bucket)
+func (pa *ArchivePool) BucketSize(bucket Hash) (int64, error) {
+ var bsize int64
+ return bsize, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ bsize, err = ai.BucketSize(bucket)
+ return err
+ })
}
-func (pa ArchivePool) CategoryCheckpointExists(cat string, chk uint32) (bool, error) {
- return pa.GetAnyArchive().CategoryCheckpointExists(cat, chk)
+func (pa *ArchivePool) CategoryCheckpointExists(cat string, chk uint32) (bool, error) {
+ var ok bool
+ return ok, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ ok, err = ai.CategoryCheckpointExists(cat, chk)
+ return err
+ })
}
-func (pa ArchivePool) GetLedgerHeader(chk uint32) (xdr.LedgerHeaderHistoryEntry, error) {
- return pa.GetAnyArchive().GetLedgerHeader(chk)
+func (pa *ArchivePool) GetLedgerHeader(chk uint32) (xdr.LedgerHeaderHistoryEntry, error) {
+ var entry xdr.LedgerHeaderHistoryEntry
+ return entry, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ entry, err = ai.GetLedgerHeader(chk)
+ return err
+ })
}
-func (pa ArchivePool) GetRootHAS() (HistoryArchiveState, error) {
- return pa.GetAnyArchive().GetRootHAS()
+func (pa *ArchivePool) GetRootHAS() (HistoryArchiveState, error) {
+ var state HistoryArchiveState
+ return state, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ state, err = ai.GetRootHAS()
+ return err
+ })
}
-func (pa ArchivePool) GetLedgers(start, end uint32) (map[uint32]*Ledger, error) {
- return pa.GetAnyArchive().GetLedgers(start, end)
+func (pa *ArchivePool) GetLedgers(start, end uint32) (map[uint32]*Ledger, error) {
+ var dict map[uint32]*Ledger
+
+ return dict, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ dict, err = ai.GetLedgers(start, end)
+ return err
+ })
}
-func (pa ArchivePool) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) {
- return pa.GetAnyArchive().GetCheckpointHAS(chk)
+func (pa *ArchivePool) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) {
+ var state HistoryArchiveState
+ return state, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ state, err = ai.GetCheckpointHAS(chk)
+ return err
+ })
}
-func (pa ArchivePool) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error {
- return pa.GetAnyArchive().PutCheckpointHAS(chk, has, opts)
+func (pa *ArchivePool) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error {
+ return pa.runRoundRobin(func(ai ArchiveInterface) error {
+ return ai.PutCheckpointHAS(chk, has, opts)
+ })
}
-func (pa ArchivePool) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error {
- return pa.GetAnyArchive().PutRootHAS(has, opts)
+func (pa *ArchivePool) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error {
+ return pa.runRoundRobin(func(ai ArchiveInterface) error {
+ return ai.PutRootHAS(has, opts)
+ })
}
-func (pa ArchivePool) ListBucket(dp DirPrefix) (chan string, chan error) {
- return pa.GetAnyArchive().ListBucket(dp)
+func (pa *ArchivePool) GetXdrStreamForHash(hash Hash) (*XdrStream, error) {
+ var stream *XdrStream
+ return stream, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ stream, err = ai.GetXdrStreamForHash(hash)
+ return err
+ })
}
-func (pa ArchivePool) ListAllBuckets() (chan string, chan error) {
- return pa.GetAnyArchive().ListAllBuckets()
+func (pa *ArchivePool) GetXdrStream(pth string) (*XdrStream, error) {
+ var stream *XdrStream
+ return stream, pa.runRoundRobin(func(ai ArchiveInterface) error {
+ var err error
+ stream, err = ai.GetXdrStream(pth)
+ return err
+ })
}
-func (pa ArchivePool) ListAllBucketHashes() (chan Hash, chan error) {
- return pa.GetAnyArchive().ListAllBucketHashes()
+func (pa *ArchivePool) GetCheckpointManager() CheckpointManager {
+ return pa.getNextArchive().GetCheckpointManager()
}
-func (pa ArchivePool) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) {
- return pa.GetAnyArchive().ListCategoryCheckpoints(cat, pth)
+//
+// The channel-based methods do not have automatic retries.
+//
+
+func (pa *ArchivePool) ListBucket(dp DirPrefix) (chan string, chan error) {
+ return pa.getNextArchive().ListBucket(dp)
}
-func (pa ArchivePool) GetXdrStreamForHash(hash Hash) (*XdrStream, error) {
- return pa.GetAnyArchive().GetXdrStreamForHash(hash)
+func (pa *ArchivePool) ListAllBuckets() (chan string, chan error) {
+ return pa.getNextArchive().ListAllBuckets()
}
-func (pa ArchivePool) GetXdrStream(pth string) (*XdrStream, error) {
- return pa.GetAnyArchive().GetXdrStream(pth)
+func (pa *ArchivePool) ListAllBucketHashes() (chan Hash, chan error) {
+ return pa.getNextArchive().ListAllBucketHashes()
}
-func (pa ArchivePool) GetCheckpointManager() CheckpointManager {
- return pa.GetAnyArchive().GetCheckpointManager()
+func (pa *ArchivePool) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) {
+ return pa.getNextArchive().ListCategoryCheckpoints(cat, pth)
}
diff --git a/historyarchive/archive_pool_test.go b/historyarchive/archive_pool_test.go
index 9f51fd75e3..2562b3ae13 100644
--- a/historyarchive/archive_pool_test.go
+++ b/historyarchive/archive_pool_test.go
@@ -5,12 +5,16 @@
package historyarchive
import (
+ "fmt"
"net/http"
"net/http/httptest"
+ "strings"
"testing"
+ "time"
"github.com/stellar/go/support/storage"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestConfiguresHttpUserAgentForArchivePool(t *testing.T) {
@@ -30,10 +34,77 @@ func TestConfiguresHttpUserAgentForArchivePool(t *testing.T) {
}
archivePool, err := NewArchivePool(archiveURLs, archiveOptions)
- assert.NoError(t, err)
+ require.NoError(t, err)
ok, err := archivePool.BucketExists(EmptyXdrArrayHash())
- assert.True(t, ok)
- assert.NoError(t, err)
- assert.Equal(t, userAgent, "uatest")
+ require.True(t, ok)
+ require.NoError(t, err)
+ require.Equal(t, userAgent, "uatest")
+}
+
+func TestArchivePoolRoundRobin(t *testing.T) {
+ accesses := []string{}
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ parts := strings.Split(r.URL.Path, "/")
+ accesses = append(accesses, parts[2])
+ w.Write([]byte("boo"))
+ }))
+
+ pool, err := NewArchivePool([]string{
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "1"),
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "2"),
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "3"),
+ }, ArchiveOptions{})
+ require.NoError(t, err)
+
+ _, err = pool.BucketExists(EmptyXdrArrayHash())
+ require.NoError(t, err)
+ _, err = pool.BucketExists(EmptyXdrArrayHash())
+ require.NoError(t, err)
+ _, err = pool.BucketExists(EmptyXdrArrayHash())
+ require.NoError(t, err)
+ _, err = pool.BucketExists(EmptyXdrArrayHash())
+ require.NoError(t, err)
+
+ assert.Contains(t, accesses, "1")
+ assert.Contains(t, accesses, "2")
+ assert.Contains(t, accesses, "3")
+ assert.Len(t, accesses, 4)
+}
+
+func TestArchivePoolCycles(t *testing.T) {
+ accesses := []string{}
+ requestTimes := []time.Time{}
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ parts := strings.Split(r.URL.Path, "/")
+ accesses = append(accesses, parts[2])
+ requestTimes = append(requestTimes, time.Now())
+ w.Write([]byte("failure"))
+ }))
+
+ pool, err := NewArchivePool([]string{
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "1"),
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "2"),
+ fmt.Sprintf("%s/%s/%s", server.URL, "fake-archive", "3"),
+ }, ArchiveOptions{})
+ require.NoError(t, err)
+
+ //
+ // A single access should retry thrice with constant back-off, so we check
+ // the distinct accesses and an appropriate delay.
+ //
+ _, err = pool.GetPathHAS("path")
+ require.Error(t, err)
+
+ require.Len(t, accesses, 4)
+ assert.Contains(t, accesses, "1")
+ assert.Contains(t, accesses, "2")
+ assert.Contains(t, accesses, "3")
+
+ assert.GreaterOrEqualf(t,
+ requestTimes[len(requestTimes)-1].Sub(requestTimes[0]),
+ 740*time.Millisecond, // some leeway
+ "")
}
diff --git a/ingest/checkpoint_change_reader.go b/ingest/checkpoint_change_reader.go
index 37e5e994e1..d1365740c6 100644
--- a/ingest/checkpoint_change_reader.go
+++ b/ingest/checkpoint_change_reader.go
@@ -71,8 +71,6 @@ const (
// bucket in a single run. This is done to allow preloading keys from
// temp set.
preloadedEntries = 20000
-
- sleepDuration = time.Second
)
// NewCheckpointChangeReader constructs a new CheckpointChangeReader instance.
@@ -126,21 +124,7 @@ func NewCheckpointChangeReader(
}
func (r *CheckpointChangeReader) bucketExists(hash historyarchive.Hash) (bool, error) {
- duration := sleepDuration
- var exists bool
- var err error
- for attempts := 0; ; attempts++ {
- exists, err = r.archive.BucketExists(hash)
- if err == nil {
- return exists, nil
- }
- if attempts >= maxStreamRetries {
- break
- }
- r.sleep(duration)
- duration *= 2
- }
- return exists, err
+ return r.archive.BucketExists(hash)
}
// streamBuckets is internal method that streams buckets from the given HAS.
diff --git a/ingest/checkpoint_change_reader_test.go b/ingest/checkpoint_change_reader_test.go
index 08730ddd0f..958f3199ac 100644
--- a/ingest/checkpoint_change_reader_test.go
+++ b/ingest/checkpoint_change_reader_test.go
@@ -311,47 +311,15 @@ func (s *BucketExistsTestSuite) TearDownTest() {
s.mockArchive.AssertExpectations(s.T())
}
-func (s *BucketExistsTestSuite) testBucketExists(
- numErrors int, expectedSleeps []time.Duration,
-) {
+func (s *BucketExistsTestSuite) TestBucketExists() {
for _, expected := range []bool{true, false} {
hash := historyarchive.Hash{1, 2, 3}
- if numErrors > 0 {
- s.mockArchive.On("BucketExists", hash).
- Return(true, errors.New("transient error")).Times(numErrors)
- }
s.mockArchive.On("BucketExists", hash).
Return(expected, nil).Once()
- s.expectedSleeps = expectedSleeps
exists, err := s.reader.bucketExists(hash)
s.Assert().Equal(expected, exists)
s.Assert().NoError(err)
- s.Assert().Empty(s.expectedSleeps)
- }
-}
-
-func (s *BucketExistsTestSuite) TestSucceedsFirstTime() {
- s.testBucketExists(0, []time.Duration{})
-}
-
-func (s *BucketExistsTestSuite) TestSucceedsSecondTime() {
- s.testBucketExists(1, []time.Duration{time.Second})
-}
-
-func (s *BucketExistsTestSuite) TestSucceedsThirdime() {
- s.testBucketExists(2, []time.Duration{time.Second, 2 * time.Second})
-}
-
-func (s *BucketExistsTestSuite) TestFailsAfterThirdTime() {
- hash := historyarchive.Hash{1, 2, 3}
- s.mockArchive.On("BucketExists", hash).
- Return(true, errors.New("transient error")).Times(4)
- s.expectedSleeps = []time.Duration{
- time.Second, 2 * time.Second, 4 * time.Second,
}
- _, err := s.reader.bucketExists(hash)
- s.Assert().EqualError(err, "transient error")
- s.Assert().Empty(s.expectedSleeps)
}
func TestReadBucketEntryTestSuite(t *testing.T) {
diff --git a/ingest/ledgerbackend/captive_core_backend.go b/ingest/ledgerbackend/captive_core_backend.go
index bc29acb54b..50e933bb6a 100644
--- a/ingest/ledgerbackend/captive_core_backend.go
+++ b/ingest/ledgerbackend/captive_core_backend.go
@@ -191,7 +191,7 @@ func NewCaptive(config CaptiveCoreConfig) (*CaptiveStellarCore, error) {
}
c := &CaptiveStellarCore{
- archive: &archivePool,
+ archive: archivePool,
ledgerHashStore: config.LedgerHashStore,
useDB: config.UseDB,
cancel: cancel,
diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md
index 6bb186b393..7bf15774f1 100644
--- a/services/horizon/CHANGELOG.md
+++ b/services/horizon/CHANGELOG.md
@@ -3,19 +3,23 @@
All notable changes to this project will be documented in this
file. This project adheres to [Semantic Versioning](http://semver.org/).
-## unreleased
+## Unreleased
### Added
-- New `db_error_total` metrics key with labels `ctx_error`, `db_error`, and `db_error_extra` ([5225](https://github.com/stellar/go/pull/5225)).
+- New `db_error_total` metrics key with labels `ctx_error`, `db_error`, and `db_error_extra` ([5225](https://github.com/stellar/go/pull/5225)).
+
+### Fixed
+- History archive access is more effective when you pass list of URLs to Horizon: they will now be accessed in a round-robin fashion, use alternative archives on errors, and intelligently back off ([5224](https://github.com/stellar/go/pull/5224))
+
## 2.28.3
### Fixed
-- Fix claimable_balance_claimants subquery in GetClaimableBalances() [5207](https://github.com/stellar/go/pull/5207)
+- Fix claimable_balance_claimants subquery in GetClaimableBalances() ([5207](https://github.com/stellar/go/pull/5207))
### Added
- New optional config `SKIP_TXMETA` ([5189](https://github.com/stellar/go/issues/5189)). Defaults to `FALSE`, when `TRUE` the following will occur:
- * history_transactions.tx_meta column will have serialized xdr that equates to empty for any protocol version, such as for `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will be empty arrays and `SorobanMeta` will be nil.
+ * history_transactions.tx_meta column will have serialized xdr that equates to empty for any protocol version, such as for `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will be empty arrays and `SorobanMeta` will be nil.
### Breaking Changes
- Removed `DISABLE_SOROBAN_INGEST` configuration parameter, use the new `SKIP_TXMETA` parameter instead.
@@ -23,15 +27,14 @@ file. This project adheres to [Semantic Versioning](http://semver.org/).
## 2.28.2
### Fixed
-- History archive caching would cause file corruption in certain environments [5197](https://github.com/stellar/go/pull/5197)
-- Server error in claimable balance API when claimant, asset and cursor query params are supplied [5200](https://github.com/stellar/go/pull/5200)
+- History archive caching would cause file corruption in certain environments ([5197](https://github.com/stellar/go/pull/5197))
+- Server error in claimable balance API when claimant, asset and cursor query params are supplied ([5200](https://github.com/stellar/go/pull/5200))
## 2.28.1
### Fixed
- Submitting transaction with a future gapped sequence number greater than 1 past current source account sequence, may result in delayed 60s timeout response, rather than expected HTTP 400 error response with `result_codes: {transaction: "tx_bad_seq"}` ([5191](https://github.com/stellar/go/pull/5191))
-
## 2.28.0
### Fixed
@@ -51,21 +54,21 @@ file. This project adheres to [Semantic Versioning](http://semver.org/).
* API `Operation` model for `InvokeHostFunctionOp` type, will have empty `asset_balance_changes`
### Breaking Changes
-- Deprecation of legacy, non-captive core ingestion([5158](https://github.com/stellar/go/pull/5158)):
+- Deprecation of legacy, non-captive core ingestion([5158](https://github.com/stellar/go/pull/5158)):
* removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update`, they are no longer usable.
* removed automatic updating of core cursor from ingestion background processing.
- **Note** for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release.
-
+ **Note** for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release.
+
The captive core process to check and verify presence of cursor usage is determined by the horizon deployment, if `NETWORK` is present, or `STELLAR_CORE_URL` is present or `CAPTIVE-CORE-HTTP-PORT` is present and set to non-zero value, or `CAPTIVE-CORE_CONFIG_PATH` is used and the toml has `HTTP_PORT` set to non-zero and `PUBLIC_HTTP_PORT` is not set to false, then it is recommended to perform the following preventative measure on the machine hosting horizon after upgraded to 2.28.0 and process restarted:
```
$ curl http:///getcursor
# If there are no cursors reported, done, no need for any action
- # If any horizon cursors exist they need to be dropped by id.
- # By default horizon sets cursor id to "HORIZON" but if it was customized
+ # If any horizon cursors exist they need to be dropped by id.
+ # By default horizon sets cursor id to "HORIZON" but if it was customized
# using the --cursor-name flag the id might be different
$ curl http:///dropcursor?id=
- ```
-
+ ```
+
## 2.27.0