diff --git a/Makefile b/Makefile index 07037315e4..69d46f68c3 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,7 @@ xdr/Stellar-contract.x \ xdr/Stellar-internal.x \ xdr/Stellar-contract-config-setting.x -XDRS = $(DOWNLOADABLE_XDRS) xdr/Stellar-lighthorizon.x \ - xdr/Stellar-exporter.x +XDRS = $(DOWNLOADABLE_XDRS) xdr/Stellar-exporter.x XDRGEN_COMMIT=e2cac557162d99b12ae73b846cf3d5bfe16636de diff --git a/exp/lighthorizon/actions/accounts.go b/exp/lighthorizon/actions/accounts.go deleted file mode 100644 index 86673afa68..0000000000 --- a/exp/lighthorizon/actions/accounts.go +++ /dev/null @@ -1,142 +0,0 @@ -package actions - -import ( - "errors" - "net/http" - "os" - "strconv" - - "github.com/stellar/go/support/log" - "github.com/stellar/go/xdr" - - "github.com/stellar/go/exp/lighthorizon/adapters" - "github.com/stellar/go/exp/lighthorizon/services" - hProtocol "github.com/stellar/go/protocols/horizon" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/render/hal" - supportProblem "github.com/stellar/go/support/render/problem" - "github.com/stellar/go/toid" -) - -const ( - urlAccountId = "account_id" -) - -func accountRequestParams(w http.ResponseWriter, r *http.Request) (string, pagination, error) { - var accountId string - var accountErr bool - - if accountId, accountErr = getURLParam(r, urlAccountId); !accountErr { - return "", pagination{}, errors.New("unable to find account_id in url path") - } - - paginate, err := paging(r) - if err != nil { - return "", pagination{}, err - } - - if paginate.Cursor < 1 { - paginate.Cursor = toid.New(1, 1, 1).ToInt64() - } - - if paginate.Limit == 0 { - paginate.Limit = 10 - } - - return accountId, paginate, nil -} - -func NewTXByAccountHandler(lightHorizon services.LightHorizon) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var accountId string - var paginate pagination - var err error - - if accountId, paginate, err = accountRequestParams(w, r); err != nil { - errorMsg := supportProblem.MakeInvalidFieldProblem("account_id", err) - sendErrorResponse(r.Context(), w, *errorMsg) - return - } - - page := hal.Page{ - Cursor: strconv.FormatInt(paginate.Cursor, 10), - Order: string(paginate.Order), - Limit: uint64(paginate.Limit), - } - page.Init() - page.FullURL = r.URL - - txns, err := lightHorizon.Transactions.GetTransactionsByAccount(ctx, paginate.Cursor, paginate.Limit, accountId) - if err != nil { - log.Error(err) - if os.IsNotExist(err) { - sendErrorResponse(r.Context(), w, supportProblem.NotFound) - } else if err != nil { - sendErrorResponse(r.Context(), w, supportProblem.ServerError) - } - return - } - - encoder := xdr.NewEncodingBuffer() - for _, txn := range txns { - var response hProtocol.Transaction - response, err = adapters.PopulateTransaction(r.URL, &txn, encoder) - if err != nil { - log.Error(err) - sendErrorResponse(r.Context(), w, supportProblem.ServerError) - return - } - - page.Add(response) - } - - page.PopulateLinks() - sendPageResponse(r.Context(), w, page) - } -} - -func NewOpsByAccountHandler(lightHorizon services.LightHorizon) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var accountId string - var paginate pagination - var err error - - if accountId, paginate, err = accountRequestParams(w, r); err != nil { - errorMsg := supportProblem.MakeInvalidFieldProblem("account_id", err) - sendErrorResponse(r.Context(), w, *errorMsg) - return - } - - page := hal.Page{ - Cursor: strconv.FormatInt(paginate.Cursor, 10), - Order: string(paginate.Order), - Limit: uint64(paginate.Limit), - } - page.Init() - page.FullURL = r.URL - - ops, err := lightHorizon.Operations.GetOperationsByAccount(ctx, paginate.Cursor, paginate.Limit, accountId) - if err != nil { - log.Error(err) - sendErrorResponse(r.Context(), w, supportProblem.ServerError) - return - } - - for _, op := range ops { - var response operations.Operation - response, err = adapters.PopulateOperation(r, &op) - if err != nil { - log.Error(err) - sendErrorResponse(r.Context(), w, supportProblem.ServerError) - return - } - - page.Add(response) - } - - page.PopulateLinks() - sendPageResponse(r.Context(), w, page) - } -} diff --git a/exp/lighthorizon/actions/accounts_test.go b/exp/lighthorizon/actions/accounts_test.go deleted file mode 100644 index 40576fb7e4..0000000000 --- a/exp/lighthorizon/actions/accounts_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package actions - -import ( - "context" - "encoding/json" - "errors" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/go-chi/chi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/exp/lighthorizon/services" - "github.com/stellar/go/support/render/problem" -) - -func setupTest() { - problem.RegisterHost("") -} - -func TestTxByAccountMissingParamError(t *testing.T) { - setupTest() - recorder := httptest.NewRecorder() - request := buildHttpRequest( - t, - map[string]string{}, - map[string]string{}, - ) - - mockOperationService := &services.MockOperationService{} - mockTransactionService := &services.MockTransactionService{} - - lh := services.LightHorizon{ - Operations: mockOperationService, - Transactions: mockTransactionService, - } - - handler := NewTXByAccountHandler(lh) - handler(recorder, request) - - resp := recorder.Result() - assert.Equal(t, http.StatusBadRequest, resp.StatusCode) - - raw, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - - var problem problem.P - err = json.Unmarshal(raw, &problem) - assert.NoError(t, err) - assert.Equal(t, "Bad Request", problem.Title) - assert.Equal(t, "bad_request", problem.Type) - assert.Equal(t, "account_id", problem.Extras["invalid_field"]) - assert.Equal(t, "The request you sent was invalid in some way.", problem.Detail) - assert.Equal(t, "unable to find account_id in url path", problem.Extras["reason"]) -} - -func TestTxByAccountServerError(t *testing.T) { - setupTest() - recorder := httptest.NewRecorder() - pathParams := make(map[string]string) - pathParams["account_id"] = "G1234" - request := buildHttpRequest( - t, - map[string]string{}, - pathParams, - ) - - mockOperationService := &services.MockOperationService{} - mockTransactionService := &services.MockTransactionService{} - mockTransactionService.On("GetTransactionsByAccount", mock.Anything, mock.Anything, mock.Anything, "G1234").Return([]common.Transaction{}, errors.New("not good")) - - lh := services.LightHorizon{ - Operations: mockOperationService, - Transactions: mockTransactionService, - } - - handler := NewTXByAccountHandler(lh) - handler(recorder, request) - - resp := recorder.Result() - assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) - - raw, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - - var problem problem.P - err = json.Unmarshal(raw, &problem) - assert.NoError(t, err) - assert.Equal(t, "Internal Server Error", problem.Title) - assert.Equal(t, "server_error", problem.Type) -} - -func TestOpsByAccountMissingParamError(t *testing.T) { - setupTest() - recorder := httptest.NewRecorder() - request := buildHttpRequest( - t, - map[string]string{}, - map[string]string{}, - ) - - mockOperationService := &services.MockOperationService{} - mockTransactionService := &services.MockTransactionService{} - - lh := services.LightHorizon{ - Operations: mockOperationService, - Transactions: mockTransactionService, - } - - handler := NewOpsByAccountHandler(lh) - handler(recorder, request) - - resp := recorder.Result() - assert.Equal(t, http.StatusBadRequest, resp.StatusCode) - - raw, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - - var problem problem.P - err = json.Unmarshal(raw, &problem) - assert.NoError(t, err) - assert.Equal(t, "Bad Request", problem.Title) - assert.Equal(t, "bad_request", problem.Type) - assert.Equal(t, "account_id", problem.Extras["invalid_field"]) - assert.Equal(t, "The request you sent was invalid in some way.", problem.Detail) - assert.Equal(t, "unable to find account_id in url path", problem.Extras["reason"]) -} - -func TestOpsByAccountServerError(t *testing.T) { - setupTest() - recorder := httptest.NewRecorder() - pathParams := make(map[string]string) - pathParams["account_id"] = "G1234" - request := buildHttpRequest( - t, - map[string]string{}, - pathParams, - ) - - mockOperationService := &services.MockOperationService{} - mockTransactionService := &services.MockTransactionService{} - mockOperationService.On("GetOperationsByAccount", mock.Anything, mock.Anything, mock.Anything, "G1234").Return([]common.Operation{}, errors.New("not good")) - - lh := services.LightHorizon{ - Operations: mockOperationService, - Transactions: mockTransactionService, - } - - handler := NewOpsByAccountHandler(lh) - handler(recorder, request) - - resp := recorder.Result() - assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) - - raw, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - - var problem problem.P - err = json.Unmarshal(raw, &problem) - assert.NoError(t, err) - assert.Equal(t, "Internal Server Error", problem.Title) - assert.Equal(t, "server_error", problem.Type) -} - -func buildHttpRequest( - t *testing.T, - queryParams map[string]string, - routeParams map[string]string, -) *http.Request { - request, err := http.NewRequest("GET", "/", nil) - require.NoError(t, err) - - query := url.Values{} - for key, value := range queryParams { - query.Set(key, value) - } - request.URL.RawQuery = query.Encode() - - chiRouteContext := chi.NewRouteContext() - for key, value := range routeParams { - chiRouteContext.URLParams.Add(key, value) - } - ctx := context.WithValue(context.Background(), chi.RouteCtxKey, chiRouteContext) - return request.WithContext(ctx) -} diff --git a/exp/lighthorizon/actions/apidocs.go b/exp/lighthorizon/actions/apidocs.go deleted file mode 100644 index 713c4054fa..0000000000 --- a/exp/lighthorizon/actions/apidocs.go +++ /dev/null @@ -1,26 +0,0 @@ -package actions - -import ( - supportProblem "github.com/stellar/go/support/render/problem" - "net/http" -) - -func ApiDocs() func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - r.URL.Scheme = "http" - r.URL.Host = "localhost:8080" - - if r.Method != "GET" { - sendErrorResponse(r.Context(), w, supportProblem.BadRequest) - return - } - - p, err := staticFiles.ReadFile("static/api_docs.yml") - if err != nil { - w.WriteHeader(http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/openapi+yaml") - w.Write(p) - } -} diff --git a/exp/lighthorizon/actions/main.go b/exp/lighthorizon/actions/main.go deleted file mode 100644 index 01769682b5..0000000000 --- a/exp/lighthorizon/actions/main.go +++ /dev/null @@ -1,124 +0,0 @@ -package actions - -import ( - "context" - "embed" - "encoding/json" - "net/http" - "net/url" - "strconv" - - "github.com/go-chi/chi" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/stellar/go/support/log" - "github.com/stellar/go/support/render/hal" - supportProblem "github.com/stellar/go/support/render/problem" -) - -var ( - //go:embed static - staticFiles embed.FS - //lint:ignore U1000 temporary - requestCount = promauto.NewCounter(prometheus.CounterOpts{ - Name: "horizon_lite_request_count", - Help: "How many requests have occurred?", - }) - //lint:ignore U1000 temporary - requestTime = promauto.NewHistogram(prometheus.HistogramOpts{ - Name: "horizon_lite_request_duration", - Help: "How long do requests take?", - Buckets: append( - prometheus.LinearBuckets(0, 50, 20), - prometheus.LinearBuckets(1000, 1000, 8)..., - ), - }) -) - -type order string - -const ( - orderAsc order = "asc" - orderDesc order = "desc" -) - -type pagination struct { - Limit uint64 - Cursor int64 - Order order -} - -func sendPageResponse(ctx context.Context, w http.ResponseWriter, page hal.Page) { - w.Header().Set("Content-Type", "application/hal+json; charset=utf-8") - encoder := json.NewEncoder(w) - encoder.SetIndent("", " ") - err := encoder.Encode(page) - if err != nil { - log.Error(err) - sendErrorResponse(ctx, w, supportProblem.ServerError) - } -} - -func sendErrorResponse(ctx context.Context, w http.ResponseWriter, problem supportProblem.P) { - supportProblem.Render(ctx, w, problem) -} - -func requestUnaryParam(r *http.Request, paramName string) (string, error) { - query, err := url.ParseQuery(r.URL.RawQuery) - if err != nil { - return "", err - } - return query.Get(paramName), nil -} - -func paging(r *http.Request) (pagination, error) { - paginate := pagination{ - Order: orderAsc, - } - - if cursorRequested, err := requestUnaryParam(r, "cursor"); err != nil { - return pagination{}, err - } else if cursorRequested != "" { - paginate.Cursor, err = strconv.ParseInt(cursorRequested, 10, 64) - if err != nil { - return pagination{}, err - } - } - - if limitRequested, err := requestUnaryParam(r, "limit"); err != nil { - return pagination{}, err - } else if limitRequested != "" { - paginate.Limit, err = strconv.ParseUint(limitRequested, 10, 64) - if err != nil { - return pagination{}, err - } - } - - if orderRequested, err := requestUnaryParam(r, "order"); err != nil { - return pagination{}, err - } else if orderRequested != "" && orderRequested == string(orderDesc) { - paginate.Order = orderDesc - } - - return paginate, nil -} - -func getURLParam(r *http.Request, key string) (string, bool) { - rctx := chi.RouteContext(r.Context()) - - if rctx == nil { - return "", false - } - - if len(rctx.URLParams.Keys) != len(rctx.URLParams.Values) { - return "", false - } - - for k := len(rctx.URLParams.Keys) - 1; k >= 0; k-- { - if rctx.URLParams.Keys[k] == key { - return rctx.URLParams.Values[k], true - } - } - - return "", false -} diff --git a/exp/lighthorizon/actions/problem.go b/exp/lighthorizon/actions/problem.go deleted file mode 100644 index cd82cfb1e8..0000000000 --- a/exp/lighthorizon/actions/problem.go +++ /dev/null @@ -1,94 +0,0 @@ -package actions - -import ( - "net/http" - - "github.com/stellar/go/support/render/problem" -) - -// Well-known and reused problems below: -// inspired by similar default established in horizon - services/horizon/internal/render/problem/problem.go -var ( - - // ClientDisconnected, represented by a non-standard HTTP status code of 499, which was introduced by - // nginix.org(https://www.nginx.com/resources/wiki/extending/api/http/) as a way to capture this state. Use it as a shortcut - // in your actions. - ClientDisconnected = problem.P{ - Type: "client_disconnected", - Title: "Client Disconnected", - Status: 499, - Detail: "The client has closed the connection.", - } - - // ServiceUnavailable is a well-known problem type. Use it as a shortcut - // in your actions. - ServiceUnavailable = problem.P{ - Type: "service_unavailable", - Title: "Service Unavailable", - Status: http.StatusServiceUnavailable, - Detail: "The request cannot be serviced at this time.", - } - - // RateLimitExceeded is a well-known problem type. Use it as a shortcut - // in your actions. - RateLimitExceeded = problem.P{ - Type: "rate_limit_exceeded", - Title: "Rate Limit Exceeded", - Status: 429, - Detail: "The rate limit for the requesting IP address is over its alloted " + - "limit. The allowed limit and requests left per time period are " + - "communicated to clients via the http response headers 'X-RateLimit-*' " + - "headers.", - } - - // NotImplemented is a well-known problem type. Use it as a shortcut - // in your actions. - NotImplemented = problem.P{ - Type: "not_implemented", - Title: "Resource Not Yet Implemented", - Status: http.StatusNotFound, - Detail: "While the requested URL is expected to eventually point to a " + - "valid resource, the work to implement the resource has not yet " + - "been completed.", - } - - // NotAcceptable is a well-known problem type. Use it as a shortcut - // in your actions. - NotAcceptable = problem.P{ - Type: "not_acceptable", - Title: "An acceptable response content-type could not be provided for " + - "this request", - Status: http.StatusNotAcceptable, - } - - // ServerOverCapacity is a well-known problem type. Use it as a shortcut - // in your actions. - ServerOverCapacity = problem.P{ - Type: "server_over_capacity", - Title: "Server Over Capacity", - Status: http.StatusServiceUnavailable, - Detail: "This horizon server is currently overloaded. Please wait for " + - "several minutes before trying your request again.", - } - - // Timeout is a well-known problem type. Use it as a shortcut - // in your actions. - Timeout = problem.P{ - Type: "timeout", - Title: "Timeout", - Status: http.StatusGatewayTimeout, - Detail: "Your request timed out before completing. Please try your " + - "request again. If you are submitting a transaction make sure you are " + - "sending exactly the same transaction (with the same sequence number).", - } - - // UnsupportedMediaType is a well-known problem type. Use it as a shortcut - // in your actions. - UnsupportedMediaType = problem.P{ - Type: "unsupported_media_type", - Title: "Unsupported Media Type", - Status: http.StatusUnsupportedMediaType, - Detail: "The request has an unsupported content type. Presently, the " + - "only supported content type is application/x-www-form-urlencoded.", - } -) diff --git a/exp/lighthorizon/actions/root.go b/exp/lighthorizon/actions/root.go deleted file mode 100644 index 3dfa4341a0..0000000000 --- a/exp/lighthorizon/actions/root.go +++ /dev/null @@ -1,29 +0,0 @@ -package actions - -import ( - "encoding/json" - "net/http" - - "github.com/stellar/go/support/log" - supportProblem "github.com/stellar/go/support/render/problem" -) - -type RootResponse struct { - Version string `json:"version"` - LedgerSource string `json:"ledger_source"` - IndexSource string `json:"index_source"` - LatestLedger uint32 `json:"latest_indexed_ledger"` -} - -func Root(config RootResponse) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/hal+json; charset=utf-8") - encoder := json.NewEncoder(w) - encoder.SetIndent("", " ") - err := encoder.Encode(config) - if err != nil { - log.Error(err) - sendErrorResponse(r.Context(), w, supportProblem.ServerError) - } - } -} diff --git a/exp/lighthorizon/actions/static/api_docs.yml b/exp/lighthorizon/actions/static/api_docs.yml deleted file mode 100644 index 281cf2b605..0000000000 --- a/exp/lighthorizon/actions/static/api_docs.yml +++ /dev/null @@ -1,228 +0,0 @@ -openapi: 3.1.0 -info: - title: Horizon Lite API - version: 0.0.1 - description: |- - The Horizon Lite API is a published web service on port 8080. It's considered - extremely experimental and only provides a minimal subset of endpoints. -servers: - - url: http://localhost:8080/ -paths: - /accounts/{account_id}/operations: - get: - operationId: GetOperationsByAccountId - parameters: - - $ref: '#/components/parameters/CursorParam' - - $ref: '#/components/parameters/LimitParam' - - $ref: '#/components/parameters/AccountIDParam' - responses: - '200': - description: OK - headers: {} - content: - application/json: - schema: - $ref: '#/components/schemas/CollectionModel_Operation' - example: - _links: - self: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/operations?cursor=6606617478959105&limit=1&order=asc - next: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/operations?cursor=6606621773926401&limit=1&order=asc - prev: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/operations?cursor=6606621773926401&limit=1&order=desc - _embedded: - records: - - _links: - self: - href: http://localhost:8080/operations/6606621773926401 - id: '6606621773926401' - paging_token: '6606621773926401' - transaction_successful: true - source_account: GBGTCH47BOEEKLPHHMR2GOK6KQFGL3O7Q53FIZTJ7S7YEDWYJ5IUDJDJ - type: manage_sell_offer - type_i: 3 - created_at: '2022-06-17T23:29:42Z' - transaction_hash: 544469b76cd90978345a4734a0ce69a9d0ddb4a6595a7afc503225a77826722a - amount: '0.0000000' - price: '0.0000001' - price_r: - n: 1 - d: 10000000 - buying_asset_type: credit_alphanum4 - buying_asset_code: USDV - buying_asset_issuer: GAXXMQMTDUQ4YEPXJMKFBGN3GETPJNEXEUHFCQJKGJDVI3XQCNBU3OZI - selling_asset_type: credit_alphanum4 - selling_asset_code: EURV - selling_asset_issuer: GAXXMQMTDUQ4YEPXJMKFBGN3GETPJNEXEUHFCQJKGJDVI3XQCNBU3OZI - offer_id: '425531' - summary: Get Operations by Account ID and Paged list - description: Get Operations by Account ID and Paged list - tags: [] - /accounts/{account_id}/transactions: - get: - operationId: GetTransactionsByAccountId - parameters: - - $ref: '#/components/parameters/CursorParam' - - $ref: '#/components/parameters/LimitParam' - - $ref: '#/components/parameters/AccountIDParam' - responses: - '200': - description: OK - headers: {} - content: - application/json: - schema: - $ref: '#/components/schemas/CollectionModel_Tx' - example: - _links: - self: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/transactions?cursor=&limit=0&order= - next: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/transactions?cursor=6606621773930497&limit=0&order= - prev: - href: http://localhost:8080/accounts/GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ/transactions?cursor=6606621773930497&limit=0&order=asc - _embedded: - records: - - memo: xdr.MemoText("psp:1405") - _links: - self: - href: http://localhost:8080/transactions/5fef21d5ef75ecf18d65a160cfab17dca8dbf6dbc4e2fd66a510719ad8dddb09 - id: 5fef21d5ef75ecf18d65a160cfab17dca8dbf6dbc4e2fd66a510719ad8dddb09 - paging_token: '6606621773930497' - successful: false - hash: 5fef21d5ef75ecf18d65a160cfab17dca8dbf6dbc4e2fd66a510719ad8dddb09 - ledger: 1538224 - created_at: '2022-06-17T23:29:42Z' - source_account: GCFJN22UG6IZHXKDVAJWAVEQ3NERGCRCURR2FHARNRBNLYFEQZGML4PW - source_account_sequence: '' - fee_account: '' - fee_charged: '3000' - max_fee: '0' - operation_count: 1 - envelope_xdr: AAAAAgAAAACKlutUN5GT3UOoE2BUkNtJEwoipGOinBFsQtXgpIZMxQAAJxAAE05oAAHUKAAAAAEAAAAAAAAAAAAAAABirQ6AAAAAAQAAAAhwc3A6MTQwNQAAAAEAAAAAAAAAAQAAAADpPdN37FA9KVcJfmMBuD8pPcaT5jqlrMeYEOTP36Zo2AAAAAJBVE1ZUgAAAAAAAAAAAAAAZ8rWY3iaDnWNtfpvLpNaCEbKdDjrd2gQODOuKpmj1vMAAAAAGHAagAAAAAAAAAABpIZMxQAAAEDNJwYToiBR6bzElRL4ORJdXXZYO9cE3-ishQLC_fWGrPGhWrW7_UkPJWvxWdQDJBjVOHuA1Jjc94NSe91hSwEL - result_xdr: AAAAAAAAC7j_____AAAAAQAAAAAAAAAB____-gAAAAA= - result_meta_xdr: '' - fee_meta_xdr: '' - memo_type: MemoTypeMemoText - signatures: - - pIZMxQAAAEDNJwYToiBR6bzElRL4ORJdXXZYO9cE3-ishQLC_fWGrPGhWrW7_UkPJWvxWdQDJBjVOHuA1Jjc94NSe91hSwEL - summary: Get Transactions by Account ID and Paged list - description: Get Transactions by Account ID and Paged list - tags: [] -components: - parameters: - CursorParam: - name: cursor - in: query - required: false - schema: - type: integer - example: 6606617478959105 - description: The packed order id consisting of Ledger Num, TX Order Num, Operation Order Num - LimitParam: - in: query - name: limit - required: false - schema: - type: integer - default: 10 - description: The numbers of items to return - AccountIDParam: - name: account_id - in: path - required: true - description: The strkey encoded Account ID - schema: - type: string - example: GDMQQNJM4UL7QIA66P7R2PZHMQINWZBM77BEBMHLFXD5JEUAHGJ7R4JZ - TransactionIDParam: - name: tx_id - in: path - required: true - description: The Transaction hash, it's id. - schema: - type: string - example: a221f4743450736cba4a78940f22b01e1f64568eec8cb04c2ae37874d86cee3d - schemas: - CollectionModelItem: - type: object - properties: - _embedded: - type: object - properties: - records: - type: array - items: - "$ref": "#/components/schemas/Item" - _links: - "$ref": "#/components/schemas/Links" - Item: - type: object - properties: - id: - type: string - _links: - "$ref": "#/components/schemas/Links" - CollectionModel_Tx: - type: object - allOf: - - $ref: "#/components/schemas/CollectionModelItem" - properties: - _embedded: - type: object - properties: - records: - type: array - items: - $ref: "#/components/schemas/EntityModel_Tx" - EntityModel_Tx: - type: object - allOf: - - $ref: "#/components/schemas/Tx" - - $ref: "#/components/schemas/Links" - Tx: - type: object - properties: - id: - type: string - hash: - type: string - ledger: - type: integer - CollectionModel_Operation: - type: object - allOf: - - $ref: "#/components/schemas/CollectionModelItem" - properties: - _embedded: - type: object - properties: - records: - type: array - items: - $ref: "#/components/schemas/EntityModel_Operation" - EntityModel_Operation: - type: object - allOf: - - $ref: "#/components/schemas/Operation" - - $ref: "#/components/schemas/Links" - Operation: - type: object - properties: - id: - type: string - type: - type: string - source_account: - type: string - Links: - type: object - additionalProperties: - "$ref": "#/components/schemas/Link" - Link: - type: object - properties: - href: - type: string -tags: [] diff --git a/exp/lighthorizon/adapters/account_merge.go b/exp/lighthorizon/adapters/account_merge.go deleted file mode 100644 index 1fa6934638..0000000000 --- a/exp/lighthorizon/adapters/account_merge.go +++ /dev/null @@ -1,21 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateAccountMergeOperation(op *common.Operation, baseOp operations.Base) (operations.AccountMerge, error) { - destination := op.Get().Body.MustDestination() - - return operations.AccountMerge{ - Base: baseOp, - Account: op.SourceAccount().Address(), - Into: destination.Address(), - // TODO: - AccountMuxed: "", - AccountMuxedID: 0, - IntoMuxed: "", - IntoMuxedID: 0, - }, nil -} diff --git a/exp/lighthorizon/adapters/allow_trust.go b/exp/lighthorizon/adapters/allow_trust.go deleted file mode 100644 index 2e3fea2188..0000000000 --- a/exp/lighthorizon/adapters/allow_trust.go +++ /dev/null @@ -1,43 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateAllowTrustOperation(op *common.Operation, baseOp operations.Base) (operations.AllowTrust, error) { - allowTrust := op.Get().Body.MustAllowTrustOp() - - var ( - assetType string - code string - issuer string - ) - - err := allowTrust.Asset.ToAsset(op.SourceAccount()).Extract(&assetType, &code, &issuer) - if err != nil { - return operations.AllowTrust{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - flags := xdr.TrustLineFlags(allowTrust.Authorize) - - return operations.AllowTrust{ - Base: baseOp, - Asset: base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - }, - - Trustee: op.SourceAccount().Address(), - Trustor: allowTrust.Trustor.Address(), - Authorize: flags.IsAuthorized(), - AuthorizeToMaintainLiabilities: flags.IsAuthorizedToMaintainLiabilitiesFlag(), - // TODO: - TrusteeMuxed: "", - TrusteeMuxedID: 0, - }, nil -} diff --git a/exp/lighthorizon/adapters/begin_sponsoring_future_reserves.go b/exp/lighthorizon/adapters/begin_sponsoring_future_reserves.go deleted file mode 100644 index a5fe86a3ce..0000000000 --- a/exp/lighthorizon/adapters/begin_sponsoring_future_reserves.go +++ /dev/null @@ -1,15 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateBeginSponsoringFutureReservesOperation(op *common.Operation, baseOp operations.Base) (operations.BeginSponsoringFutureReserves, error) { - beginSponsoringFutureReserves := op.Get().Body.MustBeginSponsoringFutureReservesOp() - - return operations.BeginSponsoringFutureReserves{ - Base: baseOp, - SponsoredID: beginSponsoringFutureReserves.SponsoredId.Address(), - }, nil -} diff --git a/exp/lighthorizon/adapters/bump_sequence.go b/exp/lighthorizon/adapters/bump_sequence.go deleted file mode 100644 index 53fe0125a2..0000000000 --- a/exp/lighthorizon/adapters/bump_sequence.go +++ /dev/null @@ -1,17 +0,0 @@ -package adapters - -import ( - "strconv" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateBumpSequenceOperation(op *common.Operation, baseOp operations.Base) (operations.BumpSequence, error) { - bumpSequence := op.Get().Body.MustBumpSequenceOp() - - return operations.BumpSequence{ - Base: baseOp, - BumpTo: strconv.FormatInt(int64(bumpSequence.BumpTo), 10), - }, nil -} diff --git a/exp/lighthorizon/adapters/change_trust.go b/exp/lighthorizon/adapters/change_trust.go deleted file mode 100644 index e06dbcfb39..0000000000 --- a/exp/lighthorizon/adapters/change_trust.go +++ /dev/null @@ -1,63 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateChangeTrustOperation(op *common.Operation, baseOp operations.Base) (operations.ChangeTrust, error) { - changeTrust := op.Get().Body.MustChangeTrustOp() - - var ( - assetType string - code string - issuer string - - liquidityPoolID string - ) - - switch changeTrust.Line.Type { - case xdr.AssetTypeAssetTypeCreditAlphanum4, xdr.AssetTypeAssetTypeCreditAlphanum12: - err := changeTrust.Line.ToAsset().Extract(&assetType, &code, &issuer) - if err != nil { - return operations.ChangeTrust{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - case xdr.AssetTypeAssetTypePoolShare: - assetType = "liquidity_pool_shares" - - if changeTrust.Line.LiquidityPool.Type != xdr.LiquidityPoolTypeLiquidityPoolConstantProduct { - return operations.ChangeTrust{}, errors.Errorf("unkown liquidity pool type %d", changeTrust.Line.LiquidityPool.Type) - } - - cp := changeTrust.Line.LiquidityPool.ConstantProduct - poolID, err := xdr.NewPoolId(cp.AssetA, cp.AssetB, cp.Fee) - if err != nil { - return operations.ChangeTrust{}, errors.Wrap(err, "error generating pool id") - } - liquidityPoolID = xdr.Hash(poolID).HexString() - default: - return operations.ChangeTrust{}, errors.Errorf("unknown asset type %d", changeTrust.Line.Type) - } - - return operations.ChangeTrust{ - Base: baseOp, - LiquidityPoolOrAsset: base.LiquidityPoolOrAsset{ - Asset: base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - }, - LiquidityPoolID: liquidityPoolID, - }, - Limit: amount.String(changeTrust.Limit), - Trustee: issuer, - Trustor: op.SourceAccount().Address(), - // TODO: - TrustorMuxed: "", - TrustorMuxedID: 0, - }, nil -} diff --git a/exp/lighthorizon/adapters/claim_claimable_balance.go b/exp/lighthorizon/adapters/claim_claimable_balance.go deleted file mode 100644 index 7dffe49d13..0000000000 --- a/exp/lighthorizon/adapters/claim_claimable_balance.go +++ /dev/null @@ -1,26 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateClaimClaimableBalanceOperation(op *common.Operation, baseOp operations.Base) (operations.ClaimClaimableBalance, error) { - claimClaimableBalance := op.Get().Body.MustClaimClaimableBalanceOp() - - balanceID, err := xdr.MarshalHex(claimClaimableBalance.BalanceId) - if err != nil { - return operations.ClaimClaimableBalance{}, errors.New("invalid balanceId") - } - - return operations.ClaimClaimableBalance{ - Base: baseOp, - BalanceID: balanceID, - Claimant: op.SourceAccount().Address(), - // TODO - ClaimantMuxed: "", - ClaimantMuxedID: 0, - }, nil -} diff --git a/exp/lighthorizon/adapters/clawback.go b/exp/lighthorizon/adapters/clawback.go deleted file mode 100644 index 32f6ed7401..0000000000 --- a/exp/lighthorizon/adapters/clawback.go +++ /dev/null @@ -1,37 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populateClawbackOperation(op *common.Operation, baseOp operations.Base) (operations.Clawback, error) { - clawback := op.Get().Body.MustClawbackOp() - - var ( - assetType string - code string - issuer string - ) - err := clawback.Asset.Extract(&assetType, &code, &issuer) - if err != nil { - return operations.Clawback{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - return operations.Clawback{ - Base: baseOp, - Asset: base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - }, - Amount: amount.String(clawback.Amount), - From: clawback.From.Address(), - // TODO: - FromMuxed: "", - FromMuxedID: 0, - }, nil -} diff --git a/exp/lighthorizon/adapters/clawback_claimable_balance.go b/exp/lighthorizon/adapters/clawback_claimable_balance.go deleted file mode 100644 index a24d4828b0..0000000000 --- a/exp/lighthorizon/adapters/clawback_claimable_balance.go +++ /dev/null @@ -1,22 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateClawbackClaimableBalanceOperation(op *common.Operation, baseOp operations.Base) (operations.ClawbackClaimableBalance, error) { - clawbackClaimableBalance := op.Get().Body.MustClawbackClaimableBalanceOp() - - balanceID, err := xdr.MarshalHex(clawbackClaimableBalance.BalanceId) - if err != nil { - return operations.ClawbackClaimableBalance{}, errors.Wrap(err, "invalid balanceId") - } - - return operations.ClawbackClaimableBalance{ - Base: baseOp, - BalanceID: balanceID, - }, nil -} diff --git a/exp/lighthorizon/adapters/create_account.go b/exp/lighthorizon/adapters/create_account.go deleted file mode 100644 index d9a7c678a1..0000000000 --- a/exp/lighthorizon/adapters/create_account.go +++ /dev/null @@ -1,18 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateCreateAccountOperation(op *common.Operation, baseOp operations.Base) (operations.CreateAccount, error) { - createAccount := op.Get().Body.MustCreateAccountOp() - - return operations.CreateAccount{ - Base: baseOp, - StartingBalance: amount.String(createAccount.StartingBalance), - Funder: op.SourceAccount().Address(), - Account: createAccount.Destination.Address(), - }, nil -} diff --git a/exp/lighthorizon/adapters/create_claimable_balance.go b/exp/lighthorizon/adapters/create_claimable_balance.go deleted file mode 100644 index 472e43b30c..0000000000 --- a/exp/lighthorizon/adapters/create_claimable_balance.go +++ /dev/null @@ -1,27 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateCreateClaimableBalanceOperation(op *common.Operation, baseOp operations.Base) (operations.CreateClaimableBalance, error) { - createClaimableBalance := op.Get().Body.MustCreateClaimableBalanceOp() - - claimants := make([]horizon.Claimant, len(createClaimableBalance.Claimants)) - for i, claimant := range createClaimableBalance.Claimants { - claimants[i] = horizon.Claimant{ - Destination: claimant.MustV0().Destination.Address(), - Predicate: claimant.MustV0().Predicate, - } - } - - return operations.CreateClaimableBalance{ - Base: baseOp, - Asset: createClaimableBalance.Asset.StringCanonical(), - Amount: amount.String(createClaimableBalance.Amount), - Claimants: claimants, - }, nil -} diff --git a/exp/lighthorizon/adapters/create_passive_sell_offer.go b/exp/lighthorizon/adapters/create_passive_sell_offer.go deleted file mode 100644 index 89b2b29e97..0000000000 --- a/exp/lighthorizon/adapters/create_passive_sell_offer.go +++ /dev/null @@ -1,51 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populateCreatePassiveSellOfferOperation(op *common.Operation, baseOp operations.Base) (operations.CreatePassiveSellOffer, error) { - createPassiveSellOffer := op.Get().Body.MustCreatePassiveSellOfferOp() - - var ( - buyingAssetType string - buyingCode string - buyingIssuer string - ) - err := createPassiveSellOffer.Buying.Extract(&buyingAssetType, &buyingCode, &buyingIssuer) - if err != nil { - return operations.CreatePassiveSellOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - sellingAssetType string - sellingCode string - sellingIssuer string - ) - err = createPassiveSellOffer.Selling.Extract(&sellingAssetType, &sellingCode, &sellingIssuer) - if err != nil { - return operations.CreatePassiveSellOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - return operations.CreatePassiveSellOffer{ - Offer: operations.Offer{ - Base: baseOp, - Amount: amount.String(createPassiveSellOffer.Amount), - Price: createPassiveSellOffer.Price.String(), - PriceR: base.Price{ - N: int32(createPassiveSellOffer.Price.N), - D: int32(createPassiveSellOffer.Price.D), - }, - BuyingAssetType: buyingAssetType, - BuyingAssetCode: buyingCode, - BuyingAssetIssuer: buyingIssuer, - SellingAssetType: sellingAssetType, - SellingAssetCode: sellingCode, - SellingAssetIssuer: sellingIssuer, - }, - }, nil -} diff --git a/exp/lighthorizon/adapters/end_sponsoring_future_reserves.go b/exp/lighthorizon/adapters/end_sponsoring_future_reserves.go deleted file mode 100644 index b6ca7a1742..0000000000 --- a/exp/lighthorizon/adapters/end_sponsoring_future_reserves.go +++ /dev/null @@ -1,38 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateEndSponsoringFutureReservesOperation(op *common.Operation, baseOp operations.Base) (operations.EndSponsoringFutureReserves, error) { - return operations.EndSponsoringFutureReserves{ - Base: baseOp, - BeginSponsor: findInitatingSandwichSponsor(op), - // TODO - BeginSponsorMuxed: "", - BeginSponsorMuxedID: 0, - }, nil -} - -func findInitatingSandwichSponsor(op *common.Operation) string { - if !op.TransactionResult.Successful() { - // Failed transactions may not have a compliant sandwich structure - // we can rely on (e.g. invalid nesting or a being operation with the wrong sponsoree ID) - // and thus we bail out since we could return incorrect information. - return "" - } - sponsoree := op.SourceAccount() - operations := op.TransactionEnvelope.Operations() - for i := int(op.OpIndex) - 1; i >= 0; i-- { - if beginOp, ok := operations[i].Body.GetBeginSponsoringFutureReservesOp(); ok && - beginOp.SponsoredId.Address() == sponsoree.Address() { - if operations[i].SourceAccount != nil { - return operations[i].SourceAccount.Address() - } else { - return op.TransactionEnvelope.SourceAccount().ToAccountId().Address() - } - } - } - return "" -} diff --git a/exp/lighthorizon/adapters/inflation.go b/exp/lighthorizon/adapters/inflation.go deleted file mode 100644 index 57c927263d..0000000000 --- a/exp/lighthorizon/adapters/inflation.go +++ /dev/null @@ -1,12 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateInflationOperation(op *common.Operation, baseOp operations.Base) (operations.Inflation, error) { - return operations.Inflation{ - Base: baseOp, - }, nil -} diff --git a/exp/lighthorizon/adapters/liquidity_pool_deposit.go b/exp/lighthorizon/adapters/liquidity_pool_deposit.go deleted file mode 100644 index f0b4384009..0000000000 --- a/exp/lighthorizon/adapters/liquidity_pool_deposit.go +++ /dev/null @@ -1,33 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/xdr" -) - -func populateLiquidityPoolDepositOperation(op *common.Operation, baseOp operations.Base) (operations.LiquidityPoolDeposit, error) { - liquidityPoolDeposit := op.Get().Body.MustLiquidityPoolDepositOp() - - return operations.LiquidityPoolDeposit{ - Base: baseOp, - // TODO: some fields missing because derived from meta - LiquidityPoolID: xdr.Hash(liquidityPoolDeposit.LiquidityPoolId).HexString(), - ReservesMax: []base.AssetAmount{ - {Amount: amount.String(liquidityPoolDeposit.MaxAmountA)}, - {Amount: amount.String(liquidityPoolDeposit.MaxAmountB)}, - }, - MinPrice: liquidityPoolDeposit.MinPrice.String(), - MinPriceR: base.Price{ - N: int32(liquidityPoolDeposit.MinPrice.N), - D: int32(liquidityPoolDeposit.MinPrice.D), - }, - MaxPrice: liquidityPoolDeposit.MaxPrice.String(), - MaxPriceR: base.Price{ - N: int32(liquidityPoolDeposit.MaxPrice.N), - D: int32(liquidityPoolDeposit.MaxPrice.D), - }, - }, nil -} diff --git a/exp/lighthorizon/adapters/liquidity_pool_withdraw.go b/exp/lighthorizon/adapters/liquidity_pool_withdraw.go deleted file mode 100644 index c618baf2de..0000000000 --- a/exp/lighthorizon/adapters/liquidity_pool_withdraw.go +++ /dev/null @@ -1,24 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/xdr" -) - -func populateLiquidityPoolWithdrawOperation(op *common.Operation, baseOp operations.Base) (operations.LiquidityPoolWithdraw, error) { - liquidityPoolWithdraw := op.Get().Body.MustLiquidityPoolWithdrawOp() - - return operations.LiquidityPoolWithdraw{ - Base: baseOp, - // TODO: some fields missing because derived from meta - LiquidityPoolID: xdr.Hash(liquidityPoolWithdraw.LiquidityPoolId).HexString(), - ReservesMin: []base.AssetAmount{ - {Amount: amount.String(liquidityPoolWithdraw.MinAmountA)}, - {Amount: amount.String(liquidityPoolWithdraw.MinAmountB)}, - }, - Shares: amount.String(liquidityPoolWithdraw.Amount), - }, nil -} diff --git a/exp/lighthorizon/adapters/manage_buy_offer.go b/exp/lighthorizon/adapters/manage_buy_offer.go deleted file mode 100644 index ccdd66bc69..0000000000 --- a/exp/lighthorizon/adapters/manage_buy_offer.go +++ /dev/null @@ -1,52 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populateManageBuyOfferOperation(op *common.Operation, baseOp operations.Base) (operations.ManageBuyOffer, error) { - manageBuyOffer := op.Get().Body.MustManageBuyOfferOp() - - var ( - buyingAssetType string - buyingCode string - buyingIssuer string - ) - err := manageBuyOffer.Buying.Extract(&buyingAssetType, &buyingCode, &buyingIssuer) - if err != nil { - return operations.ManageBuyOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - sellingAssetType string - sellingCode string - sellingIssuer string - ) - err = manageBuyOffer.Selling.Extract(&sellingAssetType, &sellingCode, &sellingIssuer) - if err != nil { - return operations.ManageBuyOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - return operations.ManageBuyOffer{ - Offer: operations.Offer{ - Base: baseOp, - Amount: amount.String(manageBuyOffer.BuyAmount), - Price: manageBuyOffer.Price.String(), - PriceR: base.Price{ - N: int32(manageBuyOffer.Price.N), - D: int32(manageBuyOffer.Price.D), - }, - BuyingAssetType: buyingAssetType, - BuyingAssetCode: buyingCode, - BuyingAssetIssuer: buyingIssuer, - SellingAssetType: sellingAssetType, - SellingAssetCode: sellingCode, - SellingAssetIssuer: sellingIssuer, - }, - OfferID: int64(manageBuyOffer.OfferId), - }, nil -} diff --git a/exp/lighthorizon/adapters/manage_data.go b/exp/lighthorizon/adapters/manage_data.go deleted file mode 100644 index dd66ed2ae4..0000000000 --- a/exp/lighthorizon/adapters/manage_data.go +++ /dev/null @@ -1,23 +0,0 @@ -package adapters - -import ( - "encoding/base64" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" -) - -func populateManageDataOperation(op *common.Operation, baseOp operations.Base) (operations.ManageData, error) { - manageData := op.Get().Body.MustManageDataOp() - - dataValue := "" - if manageData.DataValue != nil { - dataValue = base64.StdEncoding.EncodeToString(*manageData.DataValue) - } - - return operations.ManageData{ - Base: baseOp, - Name: string(manageData.DataName), - Value: dataValue, - }, nil -} diff --git a/exp/lighthorizon/adapters/manage_sell_offer.go b/exp/lighthorizon/adapters/manage_sell_offer.go deleted file mode 100644 index 56893cc1ab..0000000000 --- a/exp/lighthorizon/adapters/manage_sell_offer.go +++ /dev/null @@ -1,52 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populateManageSellOfferOperation(op *common.Operation, baseOp operations.Base) (operations.ManageSellOffer, error) { - manageSellOffer := op.Get().Body.MustManageSellOfferOp() - - var ( - buyingAssetType string - buyingCode string - buyingIssuer string - ) - err := manageSellOffer.Buying.Extract(&buyingAssetType, &buyingCode, &buyingIssuer) - if err != nil { - return operations.ManageSellOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - sellingAssetType string - sellingCode string - sellingIssuer string - ) - err = manageSellOffer.Selling.Extract(&sellingAssetType, &sellingCode, &sellingIssuer) - if err != nil { - return operations.ManageSellOffer{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - return operations.ManageSellOffer{ - Offer: operations.Offer{ - Base: baseOp, - Amount: amount.String(manageSellOffer.Amount), - Price: manageSellOffer.Price.String(), - PriceR: base.Price{ - N: int32(manageSellOffer.Price.N), - D: int32(manageSellOffer.Price.D), - }, - BuyingAssetType: buyingAssetType, - BuyingAssetCode: buyingCode, - BuyingAssetIssuer: buyingIssuer, - SellingAssetType: sellingAssetType, - SellingAssetCode: sellingCode, - SellingAssetIssuer: sellingIssuer, - }, - OfferID: int64(manageSellOffer.OfferId), - }, nil -} diff --git a/exp/lighthorizon/adapters/operation.go b/exp/lighthorizon/adapters/operation.go deleted file mode 100644 index a2448c8c58..0000000000 --- a/exp/lighthorizon/adapters/operation.go +++ /dev/null @@ -1,93 +0,0 @@ -package adapters - -import ( - "fmt" - "net/http" - "strconv" - "time" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/render/hal" - "github.com/stellar/go/xdr" -) - -func PopulateOperation(r *http.Request, op *common.Operation) (operations.Operation, error) { - hash, err := op.TransactionHash() - if err != nil { - return nil, err - } - - toid := strconv.FormatInt(op.TOID(), 10) - baseOp := operations.Base{ - ID: toid, - PT: toid, - TransactionSuccessful: op.TransactionResult.Successful(), - SourceAccount: op.SourceAccount().Address(), - LedgerCloseTime: time.Unix(int64(op.LedgerHeader.ScpValue.CloseTime), 0).UTC(), - TransactionHash: hash, - Type: operations.TypeNames[op.Get().Body.Type], - TypeI: int32(op.Get().Body.Type), - } - - lb := hal.LinkBuilder{Base: r.URL} - self := fmt.Sprintf("/operations/%s", toid) - baseOp.Links.Self = lb.Link(self) - baseOp.Links.Succeeds = lb.Linkf("/effects?order=desc&cursor=%s", baseOp.PT) - baseOp.Links.Precedes = lb.Linkf("/effects?order=asc&cursor=%s", baseOp.PT) - baseOp.Links.Transaction = lb.Linkf("/transactions/%s", hash) - baseOp.Links.Effects = lb.Link(self, "effects") - - switch op.Get().Body.Type { - case xdr.OperationTypeCreateAccount: - return populateCreateAccountOperation(op, baseOp) - case xdr.OperationTypePayment: - return populatePaymentOperation(op, baseOp) - case xdr.OperationTypePathPaymentStrictReceive: - return populatePathPaymentStrictReceiveOperation(op, baseOp) - case xdr.OperationTypePathPaymentStrictSend: - return populatePathPaymentStrictSendOperation(op, baseOp) - case xdr.OperationTypeManageBuyOffer: - return populateManageBuyOfferOperation(op, baseOp) - case xdr.OperationTypeManageSellOffer: - return populateManageSellOfferOperation(op, baseOp) - case xdr.OperationTypeCreatePassiveSellOffer: - return populateCreatePassiveSellOfferOperation(op, baseOp) - case xdr.OperationTypeSetOptions: - return populateSetOptionsOperation(op, baseOp) - case xdr.OperationTypeChangeTrust: - return populateChangeTrustOperation(op, baseOp) - case xdr.OperationTypeAllowTrust: - return populateAllowTrustOperation(op, baseOp) - case xdr.OperationTypeAccountMerge: - return populateAccountMergeOperation(op, baseOp) - case xdr.OperationTypeInflation: - return populateInflationOperation(op, baseOp) - case xdr.OperationTypeManageData: - return populateManageDataOperation(op, baseOp) - case xdr.OperationTypeBumpSequence: - return populateBumpSequenceOperation(op, baseOp) - case xdr.OperationTypeCreateClaimableBalance: - return populateCreateClaimableBalanceOperation(op, baseOp) - case xdr.OperationTypeClaimClaimableBalance: - return populateClaimClaimableBalanceOperation(op, baseOp) - case xdr.OperationTypeBeginSponsoringFutureReserves: - return populateBeginSponsoringFutureReservesOperation(op, baseOp) - case xdr.OperationTypeEndSponsoringFutureReserves: - return populateEndSponsoringFutureReservesOperation(op, baseOp) - case xdr.OperationTypeRevokeSponsorship: - return populateRevokeSponsorshipOperation(op, baseOp) - case xdr.OperationTypeClawback: - return populateClawbackOperation(op, baseOp) - case xdr.OperationTypeClawbackClaimableBalance: - return populateClawbackClaimableBalanceOperation(op, baseOp) - case xdr.OperationTypeSetTrustLineFlags: - return populateSetTrustLineFlagsOperation(op, baseOp) - case xdr.OperationTypeLiquidityPoolDeposit: - return populateLiquidityPoolDepositOperation(op, baseOp) - case xdr.OperationTypeLiquidityPoolWithdraw: - return populateLiquidityPoolWithdrawOperation(op, baseOp) - default: - return nil, fmt.Errorf("unknown operation type: %s", op.Get().Body.Type) - } -} diff --git a/exp/lighthorizon/adapters/path_payment_strict_receive.go b/exp/lighthorizon/adapters/path_payment_strict_receive.go deleted file mode 100644 index eeaabad969..0000000000 --- a/exp/lighthorizon/adapters/path_payment_strict_receive.go +++ /dev/null @@ -1,78 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populatePathPaymentStrictReceiveOperation(op *common.Operation, baseOp operations.Base) (operations.PathPayment, error) { - payment := op.Get().Body.MustPathPaymentStrictReceiveOp() - - var ( - sendAssetType string - sendCode string - sendIssuer string - ) - err := payment.SendAsset.Extract(&sendAssetType, &sendCode, &sendIssuer) - if err != nil { - return operations.PathPayment{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - destAssetType string - destCode string - destIssuer string - ) - err = payment.DestAsset.Extract(&destAssetType, &destCode, &destIssuer) - if err != nil { - return operations.PathPayment{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - sourceAmount := amount.String(0) - if op.TransactionResult.Successful() { - result := op.OperationResult().MustPathPaymentStrictReceiveResult() - sourceAmount = amount.String(result.SendAmount()) - } - - var path = make([]base.Asset, len(payment.Path)) - for i := range payment.Path { - var ( - assetType string - code string - issuer string - ) - err = payment.Path[i].Extract(&assetType, &code, &issuer) - if err != nil { - return operations.PathPayment{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - path[i] = base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - } - } - - return operations.PathPayment{ - Payment: operations.Payment{ - Base: baseOp, - From: op.SourceAccount().Address(), - To: payment.Destination.Address(), - Asset: base.Asset{ - Type: destAssetType, - Code: destCode, - Issuer: destIssuer, - }, - Amount: amount.String(payment.DestAmount), - }, - Path: path, - SourceAmount: sourceAmount, - SourceMax: amount.String(payment.SendMax), - SourceAssetType: sendAssetType, - SourceAssetCode: sendCode, - SourceAssetIssuer: sendIssuer, - }, nil -} diff --git a/exp/lighthorizon/adapters/path_payment_strict_send.go b/exp/lighthorizon/adapters/path_payment_strict_send.go deleted file mode 100644 index 0068db30b5..0000000000 --- a/exp/lighthorizon/adapters/path_payment_strict_send.go +++ /dev/null @@ -1,78 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populatePathPaymentStrictSendOperation(op *common.Operation, baseOp operations.Base) (operations.PathPaymentStrictSend, error) { - payment := op.Get().Body.MustPathPaymentStrictSendOp() - - var ( - sendAssetType string - sendCode string - sendIssuer string - ) - err := payment.SendAsset.Extract(&sendAssetType, &sendCode, &sendIssuer) - if err != nil { - return operations.PathPaymentStrictSend{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - destAssetType string - destCode string - destIssuer string - ) - err = payment.DestAsset.Extract(&destAssetType, &destCode, &destIssuer) - if err != nil { - return operations.PathPaymentStrictSend{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - destAmount := amount.String(0) - if op.TransactionResult.Successful() { - result := op.OperationResult().MustPathPaymentStrictSendResult() - destAmount = amount.String(result.DestAmount()) - } - - var path = make([]base.Asset, len(payment.Path)) - for i := range payment.Path { - var ( - assetType string - code string - issuer string - ) - err = payment.Path[i].Extract(&assetType, &code, &issuer) - if err != nil { - return operations.PathPaymentStrictSend{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - path[i] = base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - } - } - - return operations.PathPaymentStrictSend{ - Payment: operations.Payment{ - Base: baseOp, - From: op.SourceAccount().Address(), - To: payment.Destination.Address(), - Asset: base.Asset{ - Type: destAssetType, - Code: destCode, - Issuer: destIssuer, - }, - Amount: destAmount, - }, - Path: path, - SourceAmount: amount.String(payment.SendAmount), - DestinationMin: amount.String(payment.DestMin), - SourceAssetType: sendAssetType, - SourceAssetCode: sendCode, - SourceAssetIssuer: sendIssuer, - }, nil -} diff --git a/exp/lighthorizon/adapters/payment.go b/exp/lighthorizon/adapters/payment.go deleted file mode 100644 index 97af5f6120..0000000000 --- a/exp/lighthorizon/adapters/payment.go +++ /dev/null @@ -1,35 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/amount" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" -) - -func populatePaymentOperation(op *common.Operation, baseOp operations.Base) (operations.Payment, error) { - payment := op.Get().Body.MustPaymentOp() - - var ( - assetType string - code string - issuer string - ) - err := payment.Asset.Extract(&assetType, &code, &issuer) - if err != nil { - return operations.Payment{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - return operations.Payment{ - Base: baseOp, - To: payment.Destination.Address(), - From: op.SourceAccount().Address(), - Asset: base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - }, - Amount: amount.StringFromInt64(int64(payment.Amount)), - }, nil -} diff --git a/exp/lighthorizon/adapters/revoke_sponsorship.go b/exp/lighthorizon/adapters/revoke_sponsorship.go deleted file mode 100644 index cb19decc5c..0000000000 --- a/exp/lighthorizon/adapters/revoke_sponsorship.go +++ /dev/null @@ -1,66 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateRevokeSponsorshipOperation(op *common.Operation, baseOp operations.Base) (operations.RevokeSponsorship, error) { - revokeSponsorship := op.Get().Body.MustRevokeSponsorshipOp() - - switch revokeSponsorship.Type { - case xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: - ret := operations.RevokeSponsorship{ - Base: baseOp, - } - - ledgerKey := revokeSponsorship.LedgerKey - - switch ledgerKey.Type { - case xdr.LedgerEntryTypeAccount: - accountID := ledgerKey.Account.AccountId.Address() - ret.AccountID = &accountID - case xdr.LedgerEntryTypeClaimableBalance: - marshalHex, err := xdr.MarshalHex(ledgerKey.ClaimableBalance.BalanceId) - if err != nil { - return operations.RevokeSponsorship{}, err - } - ret.ClaimableBalanceID = &marshalHex - case xdr.LedgerEntryTypeData: - accountID := ledgerKey.Data.AccountId.Address() - dataName := string(ledgerKey.Data.DataName) - ret.DataAccountID = &accountID - ret.DataName = &dataName - case xdr.LedgerEntryTypeOffer: - offerID := int64(ledgerKey.Offer.OfferId) - ret.OfferID = &offerID - case xdr.LedgerEntryTypeTrustline: - trustlineAccountID := ledgerKey.TrustLine.AccountId.Address() - ret.TrustlineAccountID = &trustlineAccountID - if ledgerKey.TrustLine.Asset.Type == xdr.AssetTypeAssetTypePoolShare { - trustlineLiquidityPoolID := xdr.Hash(*ledgerKey.TrustLine.Asset.LiquidityPoolId).HexString() - ret.TrustlineLiquidityPoolID = &trustlineLiquidityPoolID - } else { - trustlineAsset := ledgerKey.TrustLine.Asset.ToAsset().StringCanonical() - ret.TrustlineAsset = &trustlineAsset - } - default: - return operations.RevokeSponsorship{}, errors.Errorf("invalid ledger key type: %d", ledgerKey.Type) - } - - return ret, nil - case xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner: - signerAccountID := revokeSponsorship.Signer.AccountId.Address() - signerKey := revokeSponsorship.Signer.SignerKey.Address() - - return operations.RevokeSponsorship{ - Base: baseOp, - SignerAccountID: &signerAccountID, - SignerKey: &signerKey, - }, nil - } - - return operations.RevokeSponsorship{}, errors.Errorf("invalid revoke type: %d", revokeSponsorship.Type) -} diff --git a/exp/lighthorizon/adapters/set_options.go b/exp/lighthorizon/adapters/set_options.go deleted file mode 100644 index cf2cdeb20f..0000000000 --- a/exp/lighthorizon/adapters/set_options.go +++ /dev/null @@ -1,122 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/xdr" -) - -func populateSetOptionsOperation(op *common.Operation, baseOp operations.Base) (operations.SetOptions, error) { - setOptions := op.Get().Body.MustSetOptionsOp() - - homeDomain := "" - if setOptions.HomeDomain != nil { - homeDomain = string(*setOptions.HomeDomain) - } - - inflationDest := "" - if setOptions.InflationDest != nil { - inflationDest = setOptions.InflationDest.Address() - } - - var signerKey string - var signerWeight *int - if setOptions.Signer != nil { - signerKey = setOptions.Signer.Key.Address() - signerWeightInt := int(setOptions.Signer.Weight) - signerWeight = &signerWeightInt - } - - var masterKeyWeight, lowThreshold, medThreshold, highThreshold *int - if setOptions.MasterWeight != nil { - masterKeyWeightInt := int(*setOptions.MasterWeight) - masterKeyWeight = &masterKeyWeightInt - } - if setOptions.LowThreshold != nil { - lowThresholdInt := int(*setOptions.LowThreshold) - lowThreshold = &lowThresholdInt - } - if setOptions.MedThreshold != nil { - medThresholdInt := int(*setOptions.MedThreshold) - medThreshold = &medThresholdInt - } - if setOptions.HighThreshold != nil { - highThresholdInt := int(*setOptions.HighThreshold) - highThreshold = &highThresholdInt - } - - var ( - setFlags []int - setFlagsS []string - - clearFlags []int - clearFlagsS []string - ) - - if setOptions.SetFlags != nil && *setOptions.SetFlags > 0 { - f := xdr.AccountFlags(*setOptions.SetFlags) - - if f.IsAuthRequired() { - setFlags = append(setFlags, int(xdr.AccountFlagsAuthRequiredFlag)) - setFlagsS = append(setFlagsS, "auth_required") - } - - if f.IsAuthRevocable() { - setFlags = append(setFlags, int(xdr.AccountFlagsAuthRevocableFlag)) - setFlagsS = append(setFlagsS, "auth_revocable") - } - - if f.IsAuthImmutable() { - setFlags = append(setFlags, int(xdr.AccountFlagsAuthImmutableFlag)) - setFlagsS = append(setFlagsS, "auth_immutable") - } - - if f.IsAuthClawbackEnabled() { - setFlags = append(setFlags, int(xdr.AccountFlagsAuthClawbackEnabledFlag)) - setFlagsS = append(setFlagsS, "auth_clawback_enabled") - } - } - - if setOptions.ClearFlags != nil && *setOptions.ClearFlags > 0 { - f := xdr.AccountFlags(*setOptions.ClearFlags) - - if f.IsAuthRequired() { - clearFlags = append(clearFlags, int(xdr.AccountFlagsAuthRequiredFlag)) - clearFlagsS = append(clearFlagsS, "auth_required") - } - - if f.IsAuthRevocable() { - clearFlags = append(clearFlags, int(xdr.AccountFlagsAuthRevocableFlag)) - clearFlagsS = append(clearFlagsS, "auth_revocable") - } - - if f.IsAuthImmutable() { - clearFlags = append(clearFlags, int(xdr.AccountFlagsAuthImmutableFlag)) - clearFlagsS = append(clearFlagsS, "auth_immutable") - } - - if f.IsAuthClawbackEnabled() { - clearFlags = append(clearFlags, int(xdr.AccountFlagsAuthClawbackEnabledFlag)) - clearFlagsS = append(clearFlagsS, "auth_clawback_enabled") - } - } - - return operations.SetOptions{ - Base: baseOp, - HomeDomain: homeDomain, - InflationDest: inflationDest, - - MasterKeyWeight: masterKeyWeight, - SignerKey: signerKey, - SignerWeight: signerWeight, - - SetFlags: setFlags, - SetFlagsS: setFlagsS, - ClearFlags: clearFlags, - ClearFlagsS: clearFlagsS, - - LowThreshold: lowThreshold, - MedThreshold: medThreshold, - HighThreshold: highThreshold, - }, nil -} diff --git a/exp/lighthorizon/adapters/set_trust_line_flags.go b/exp/lighthorizon/adapters/set_trust_line_flags.go deleted file mode 100644 index 2969dcb2b5..0000000000 --- a/exp/lighthorizon/adapters/set_trust_line_flags.go +++ /dev/null @@ -1,83 +0,0 @@ -package adapters - -import ( - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/protocols/horizon/base" - "github.com/stellar/go/protocols/horizon/operations" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/xdr" -) - -func populateSetTrustLineFlagsOperation(op *common.Operation, baseOp operations.Base) (operations.SetTrustLineFlags, error) { - setTrustLineFlags := op.Get().Body.MustSetTrustLineFlagsOp() - - var ( - assetType string - code string - issuer string - ) - err := setTrustLineFlags.Asset.Extract(&assetType, &code, &issuer) - if err != nil { - return operations.SetTrustLineFlags{}, errors.Wrap(err, "xdr.Asset.Extract error") - } - - var ( - setFlags []int - setFlagsS []string - - clearFlags []int - clearFlagsS []string - ) - - if setTrustLineFlags.SetFlags > 0 { - f := xdr.TrustLineFlags(setTrustLineFlags.SetFlags) - - if f.IsAuthorized() { - setFlags = append(setFlags, int(xdr.TrustLineFlagsAuthorizedFlag)) - setFlagsS = append(setFlagsS, "authorized") - } - - if f.IsAuthorizedToMaintainLiabilitiesFlag() { - setFlags = append(setFlags, int(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag)) - setFlagsS = append(setFlagsS, "authorized_to_maintain_liabilites") - } - - if f.IsClawbackEnabledFlag() { - setFlags = append(setFlags, int(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag)) - setFlagsS = append(setFlagsS, "clawback_enabled") - } - } - - if setTrustLineFlags.ClearFlags > 0 { - f := xdr.TrustLineFlags(setTrustLineFlags.ClearFlags) - - if f.IsAuthorized() { - clearFlags = append(clearFlags, int(xdr.TrustLineFlagsAuthorizedFlag)) - clearFlagsS = append(clearFlagsS, "authorized") - } - - if f.IsAuthorizedToMaintainLiabilitiesFlag() { - clearFlags = append(clearFlags, int(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag)) - clearFlagsS = append(clearFlagsS, "authorized_to_maintain_liabilites") - } - - if f.IsClawbackEnabledFlag() { - clearFlags = append(clearFlags, int(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag)) - clearFlagsS = append(clearFlagsS, "clawback_enabled") - } - } - - return operations.SetTrustLineFlags{ - Base: baseOp, - Asset: base.Asset{ - Type: assetType, - Code: code, - Issuer: issuer, - }, - Trustor: setTrustLineFlags.Trustor.Address(), - SetFlags: setFlags, - SetFlagsS: setFlagsS, - ClearFlags: clearFlags, - ClearFlagsS: clearFlagsS, - }, nil -} diff --git a/exp/lighthorizon/adapters/testdata/transactions.json b/exp/lighthorizon/adapters/testdata/transactions.json deleted file mode 100644 index 6128801533..0000000000 --- a/exp/lighthorizon/adapters/testdata/transactions.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "_links": { - "self": { - "href": "https://horizon.stellar.org/accounts/GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD/transactions?cursor=179530990183178241\u0026limit=1\u0026order=desc" - }, - "next": { - "href": "https://horizon.stellar.org/accounts/GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD/transactions?cursor=179530990183174144\u0026limit=1\u0026order=desc" - }, - "prev": { - "href": "https://horizon.stellar.org/accounts/GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD/transactions?cursor=179530990183174144\u0026limit=1\u0026order=asc" - } - }, - "_embedded": { - "records": [ - { - "_links": { - "self": { - "href": "https://horizon.stellar.org/transactions/55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd" - }, - "account": { - "href": "https://horizon.stellar.org/accounts/GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD" - }, - "ledger": { - "href": "https://horizon.stellar.org/ledgers/41800316" - }, - "operations": { - "href": "https://horizon.stellar.org/transactions/55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd/operations{?cursor,limit,order}", - "templated": true - }, - "effects": { - "href": "https://horizon.stellar.org/transactions/55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd/effects{?cursor,limit,order}", - "templated": true - }, - "precedes": { - "href": "https://horizon.stellar.org/transactions?order=asc\u0026cursor=179530990183174144" - }, - "succeeds": { - "href": "https://horizon.stellar.org/transactions?order=desc\u0026cursor=179530990183174144" - }, - "transaction": { - "href": "https://horizon.stellar.org/transactions/55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd" - } - }, - "id": "55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd", - "paging_token": "179530990183174144", - "successful": true, - "hash": "55d8aa3693489ffc1d70b8ba33b8b5c012ec098f6f104383e3f090048488febd", - "ledger": 41800316, - "created_at": "2022-07-17T13:08:41Z", - "source_account": "GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD", - "source_account_sequence": "172589382434294350", - "fee_account": "GBFHFINUD6NVGSX33PY25DDRCABN3H2JTDMLUEXAUEJVV22HTXVGLEZD", - "fee_charged": "100", - "max_fee": "100000", - "operation_count": 1, - "envelope_xdr": "AAAAAgAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQABhqACZSkhAAAKTgAAAAAAAAAAAAAAAQAAAAEAAAAASnKhtB+bU0r72/GujHEQAt2fSZjYuhLgoRNa60ed6mUAAAANAAAAAXlYTE0AAAAAIjbXcP4NPgFSGXXVz3rEhCtwldaxqddo0+mmMumZBr4AAAACVAvkAAAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQAAAAJEUklGVAAAAAAAAAAAAAAAvSOzPqUOGnDIcJOm7T85qDFRM0wfOVoubgkEPk95DZ0AAAEQvqAGdQAAAAEAAAAAAAAAAAAAAAFHneplAAAAQAVm9muIrK31Z+m2ZvhDYhtuoHcc/n+MO0DOaiQjfW+tsUNVCOw7foHiDRVLBdAHBZT+xxa3F+Ek9wQiKzxtQQM=", - "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAIAAAABAAAAAPaTW9sBV2ja6yDUtPcpGpUrnVEHaTHC4I065TklIsguAAAAAD1H0goAAAAAAAAAAlQE8wsAAAABeVhMTQAAAAAiNtdw/g0+AVIZddXPesSEK3CV1rGp12jT6aYy6ZkGvgAAAAJUC+QAAAAAAgRnzllf8Sas0MUQlkxROsBgUzEoIN2XrYP9tlH5SINjAAAAAkRSSUZUAAAAAAAAAAAAAAC9I7M+pQ4acMhwk6btPzmoMVEzTB85Wi5uCQQ+T3kNnQAAARN/56NFAAAAAAAAAAJUBPMLAAAAAEpyobQfm1NK+9vxroxxEALdn0mY2LoS4KETWutHneplAAAAAkRSSUZUAAAAAAAAAAAAAAC9I7M+pQ4acMhwk6btPzmoMVEzTB85Wi5uCQQ+T3kNnQAAARN/56NFAAAAAA==", - "result_meta_xdr": "AAAAAgAAAAIAAAADAn3SfAAAAAAAAAAASnKhtB+bU0r72/GujHEQAt2fSZjYuhLgoRNa60ed6mUAAAAAENitnwJlKSEAAApNAAAACgAAAAEAAAAAxHHGQ3BiyVBqiTQuU4oa2kBNL0HPHTolX0Mh98bg4XUAAAAAAAAACWxvYnN0ci5jbwAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAwAAAAACfdJjAAAAAGLUCUkAAAAAAAAAAQJ90nwAAAAAAAAAAEpyobQfm1NK+9vxroxxEALdn0mY2LoS4KETWutHneplAAAAABDYrZ8CZSkhAAAKTgAAAAoAAAABAAAAAMRxxkNwYslQaok0LlOKGtpATS9Bzx06JV9DIffG4OF1AAAAAAAAAAlsb2JzdHIuY28AAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAMAAAAAAn3SfAAAAABi1AnZAAAAAAAAAAEAAAAMAAAAAwJ90nwAAAAAAAAAAPaTW9sBV2ja6yDUtPcpGpUrnVEHaTHC4I065TklIsguAAAAPP5dpSACFip8AC7CtgAAAAcAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAc2nqv7AAAADbUPYzLAAAAAgAAAAAAAAAAAAAAAAAAAAMAAAAAAn3SegAAAABi1AnNAAAAAAAAAAECfdJ8AAAAAAAAAAD2k1vbAVdo2usg1LT3KRqVK51RB2kxwuCNOuU5JSLILgAAADqqWLIVAhYqfAAuwrYAAAAHAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAHNp6r+wAAAA0gDiZwAAAAAIAAAAAAAAAAAAAAAAAAAADAAAAAAJ90noAAAAAYtQJzQAAAAAAAAADAn3SUAAAAAUEZ85ZX/EmrNDFEJZMUTrAYFMxKCDdl62D/bZR+UiDYwAAAAAAAAAAAAAAAkRSSUZUAAAAAAAAAAAAAAC9I7M+pQ4acMhwk6btPzmoMVEzTB85Wi5uCQQ+T3kNnQAAAB4AAAIEaTMNuAAA8H8XoYXHAAAUwrrMCE0AAAAAAAAAaAAAAAAAAAABAn3SfAAAAAUEZ85ZX/EmrNDFEJZMUTrAYFMxKCDdl62D/bZR+UiDYwAAAAAAAAAAAAAAAkRSSUZUAAAAAAAAAAAAAAC9I7M+pQ4acMhwk6btPzmoMVEzTB85Wi5uCQQ+T3kNnQAAAB4AAAIGvTgAwwAA72uXueKCAAAUwrrMCE0AAAAAAAAAaAAAAAAAAAADAn3SYwAAAAEAAAAASnKhtB+bU0r72/GujHEQAt2fSZjYuhLgoRNa60ed6mUAAAACRFJJRlQAAAAAAAAAAAAAAL0jsz6lDhpwyHCTpu0/OagxUTNMHzlaLm4JBD5PeQ2dAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAECfdJ8AAAAAQAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQAAAAJEUklGVAAAAAAAAAAAAAAAvSOzPqUOGnDIcJOm7T85qDFRM0wfOVoubgkEPk95DZ0AAAETf+ejRX//////////AAAAAQAAAAAAAAAAAAAAAwJ90nwAAAABAAAAAPaTW9sBV2ja6yDUtPcpGpUrnVEHaTHC4I065TklIsguAAAAAXlYTE0AAAAAIjbXcP4NPgFSGXXVz3rEhCtwldaxqddo0+mmMumZBr4AAAAggUA/Y3//////////AAAAAQAAAAEAAAA21OED/gAAABzamxRcAAAAAAAAAAAAAAABAn3SfAAAAAEAAAAA9pNb2wFXaNrrINS09ykalSudUQdpMcLgjTrlOSUiyC4AAAABeVhMTQAAAAAiNtdw/g0+AVIZddXPesSEK3CV1rGp12jT6aYy6ZkGvgAAACLVTCNjf/////////8AAAABAAAAAQAAADSA1R//AAAAHNqbFFwAAAAAAAAAAAAAAAMCfdJ8AAAAAgAAAAD2k1vbAVdo2usg1LT3KRqVK51RB2kxwuCNOuU5JSLILgAAAAA9R9IKAAAAAAAAAAF5WExNAAAAACI213D+DT4BUhl11c96xIQrcJXWsanXaNPppjLpmQa+AAAANtQ9jMt7hXu5e4QLegAAAAAAAAAAAAAAAAAAAAECfdJ8AAAAAgAAAAD2k1vbAVdo2usg1LT3KRqVK51RB2kxwuCNOuU5JSLILgAAAAA9R9IKAAAAAAAAAAF5WExNAAAAACI213D+DT4BUhl11c96xIQrcJXWsanXaNPppjLpmQa+AAAANIA4mcB7hXu5e4QLegAAAAAAAAAAAAAAAAAAAAMCfcCZAAAAAQAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQAAAAF5WExNAAAAACI213D+DT4BUhl11c96xIQrcJXWsanXaNPppjLpmQa+AAAAEqEyDdl//////////wAAAAEAAAAAAAAAAAAAAAECfdJ8AAAAAQAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQAAAAF5WExNAAAAACI213D+DT4BUhl11c96xIQrcJXWsanXaNPppjLpmQa+AAAAEE0mKdl//////////wAAAAEAAAAAAAAAAAAAAAA=", - "fee_meta_xdr": "AAAAAgAAAAMCfdJjAAAAAAAAAABKcqG0H5tTSvvb8a6McRAC3Z9JmNi6EuChE1rrR53qZQAAAAAQ2K4DAmUpIQAACk0AAAAKAAAAAQAAAADEccZDcGLJUGqJNC5TihraQE0vQc8dOiVfQyH3xuDhdQAAAAAAAAAJbG9ic3RyLmNvAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAADAAAAAAJ90mMAAAAAYtQJSQAAAAAAAAABAn3SfAAAAAAAAAAASnKhtB+bU0r72/GujHEQAt2fSZjYuhLgoRNa60ed6mUAAAAAENitnwJlKSEAAApNAAAACgAAAAEAAAAAxHHGQ3BiyVBqiTQuU4oa2kBNL0HPHTolX0Mh98bg4XUAAAAAAAAACWxvYnN0ci5jbwAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAwAAAAACfdJjAAAAAGLUCUkAAAAA", - "memo_type": "none", - "signatures": [ - "BWb2a4isrfVn6bZm+ENiG26gdxz+f4w7QM5qJCN9b62xQ1UI7Dt+geINFUsF0AcFlP7HFrcX4ST3BCIrPG1BAw==" - ] - } - ] - } -} \ No newline at end of file diff --git a/exp/lighthorizon/adapters/transaction.go b/exp/lighthorizon/adapters/transaction.go deleted file mode 100644 index 6942668c8d..0000000000 --- a/exp/lighthorizon/adapters/transaction.go +++ /dev/null @@ -1,295 +0,0 @@ -package adapters - -import ( - "bytes" - "encoding/base64" - "encoding/hex" - "fmt" - "net/url" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/network" - protocol "github.com/stellar/go/protocols/horizon" - "github.com/stellar/go/support/render/hal" - "github.com/stellar/go/xdr" - "golang.org/x/exp/constraints" -) - -// PopulateTransaction converts between ingested XDR and RESTful JSON. In -// Horizon Classic, the data goes from Captive Core -> DB -> JSON. In our case, -// there's no DB intermediary, so we need to directly translate. -func PopulateTransaction( - baseUrl *url.URL, - tx *common.Transaction, - encoder *xdr.EncodingBuffer, -) (dest protocol.Transaction, err error) { - txHash, err := tx.TransactionHash() - if err != nil { - return - } - - dest.ID = txHash - dest.Successful = tx.Result.Successful() - dest.Hash = txHash - dest.Ledger = int32(tx.LedgerHeader.LedgerSeq) - dest.LedgerCloseTime = time.Unix(int64(tx.LedgerHeader.ScpValue.CloseTime), 0).UTC() - - source := tx.SourceAccount() - dest.Account = source.ToAccountId().Address() - if _, ok := source.GetMed25519(); ok { - dest.AccountMuxed, err = source.GetAddress() - if err != nil { - return - } - dest.AccountMuxedID, err = source.GetId() - if err != nil { - return - } - } - dest.AccountSequence = tx.Envelope.SeqNum() - - envelopeBase64, err := encoder.MarshalBase64(tx.Envelope) - if err != nil { - return - } - resultBase64, err := encoder.MarshalBase64(&tx.Result.Result) - if err != nil { - return - } - metaBase64, err := encoder.MarshalBase64(tx.UnsafeMeta) - if err != nil { - return - } - feeMetaBase64, err := encoder.MarshalBase64(tx.FeeChanges) - if err != nil { - return - } - - dest.OperationCount = int32(len(tx.Envelope.Operations())) - dest.EnvelopeXdr = envelopeBase64 - dest.ResultXdr = resultBase64 - dest.ResultMetaXdr = metaBase64 - dest.FeeMetaXdr = feeMetaBase64 - dest.MemoType = memoType(*tx.LedgerTransaction) - if m, ok := memo(*tx.LedgerTransaction); ok { - dest.Memo = m - if dest.MemoType == "text" { - var mb string - if mb, err = memoBytes(envelopeBase64); err != nil { - return - } else { - dest.MemoBytes = mb - } - } - } - - dest.Signatures = signatures(tx.Envelope.Signatures()) - - // If we never use this, we'll remove it later. This just defends us against - // nil dereferences. - dest.Preconditions = &protocol.TransactionPreconditions{} - - if tb := tx.Envelope.Preconditions().TimeBounds; tb != nil { - dest.Preconditions.TimeBounds = &protocol.TransactionPreconditionsTimebounds{ - MaxTime: formatTime(tb.MaxTime), - MinTime: formatTime(tb.MinTime), - } - } - - if lb := tx.Envelope.LedgerBounds(); lb != nil { - dest.Preconditions.LedgerBounds = &protocol.TransactionPreconditionsLedgerbounds{ - MinLedger: uint32(lb.MinLedger), - MaxLedger: uint32(lb.MaxLedger), - } - } - - if minSeq := tx.Envelope.MinSeqNum(); minSeq != nil { - dest.Preconditions.MinAccountSequence = fmt.Sprint(*minSeq) - } - - if minSeqAge := tx.Envelope.MinSeqAge(); minSeqAge != nil && *minSeqAge > 0 { - dest.Preconditions.MinAccountSequenceAge = formatTime(*minSeqAge) - } - - if minSeqGap := tx.Envelope.MinSeqLedgerGap(); minSeqGap != nil { - dest.Preconditions.MinAccountSequenceLedgerGap = uint32(*minSeqGap) - } - - if signers := tx.Envelope.ExtraSigners(); len(signers) > 0 { - dest.Preconditions.ExtraSigners = formatSigners(signers) - } - - if tx.Envelope.IsFeeBump() { - innerTx, ok := tx.Envelope.FeeBump.Tx.InnerTx.GetV1() - if !ok { - panic("Failed to parse inner transaction from fee-bump tx.") - } - - var rawInnerHash [32]byte - rawInnerHash, err = network.HashTransaction(innerTx.Tx, tx.NetworkPassphrase) - if err != nil { - return - } - innerHash := hex.EncodeToString(rawInnerHash[:]) - - feeAccountMuxed := tx.Envelope.FeeBumpAccount() - dest.FeeAccount = feeAccountMuxed.ToAccountId().Address() - if _, ok := feeAccountMuxed.GetMed25519(); ok { - dest.FeeAccountMuxed, err = feeAccountMuxed.GetAddress() - if err != nil { - return - } - dest.FeeAccountMuxedID, err = feeAccountMuxed.GetId() - if err != nil { - return - } - } - - dest.MaxFee = tx.Envelope.FeeBumpFee() - dest.FeeBumpTransaction = &protocol.FeeBumpTransaction{ - Hash: txHash, - Signatures: signatures(tx.Envelope.FeeBumpSignatures()), - } - dest.InnerTransaction = &protocol.InnerTransaction{ - Hash: innerHash, - MaxFee: int64(innerTx.Tx.Fee), - Signatures: signatures(tx.Envelope.Signatures()), - } - // TODO: Figure out what this means? Maybe @tamirms knows. - // if transactionHash != row.TransactionHash { - // dest.Signatures = dest.InnerTransaction.Signatures - // } - } else { - dest.FeeAccount = dest.Account - dest.FeeAccountMuxed = dest.AccountMuxed - dest.FeeAccountMuxedID = dest.AccountMuxedID - dest.MaxFee = int64(tx.Envelope.Fee()) - } - dest.FeeCharged = int64(tx.Result.Result.FeeCharged) - - lb := hal.LinkBuilder{Base: baseUrl} - dest.PT = strconv.FormatUint(uint64(tx.TOID()), 10) - dest.Links.Account = lb.Link("/accounts", dest.Account) - dest.Links.Ledger = lb.Link("/ledgers", fmt.Sprint(dest.Ledger)) - dest.Links.Operations = lb.PagedLink("/transactions", dest.ID, "operations") - dest.Links.Effects = lb.PagedLink("/transactions", dest.ID, "effects") - dest.Links.Self = lb.Link("/transactions", dest.ID) - dest.Links.Transaction = dest.Links.Self - dest.Links.Succeeds = lb.Linkf("/transactions?order=desc&cursor=%s", dest.PT) - dest.Links.Precedes = lb.Linkf("/transactions?order=asc&cursor=%s", dest.PT) - - // If we didn't need the structure, drop it. - if !tx.HasPreconditions() { - dest.Preconditions = nil - } - - return -} - -func formatSigners(s []xdr.SignerKey) []string { - if s == nil { - return nil - } - - signers := make([]string, len(s)) - for i, key := range s { - signers[i] = key.Address() - } - return signers -} - -func signatures(xdrSignatures []xdr.DecoratedSignature) []string { - signatures := make([]string, len(xdrSignatures)) - for i, sig := range xdrSignatures { - signatures[i] = base64.StdEncoding.EncodeToString(sig.Signature) - } - return signatures -} - -func memoType(tx ingester.LedgerTransaction) string { - switch tx.Envelope.Memo().Type { - case xdr.MemoTypeMemoNone: - return "none" - case xdr.MemoTypeMemoText: - return "text" - case xdr.MemoTypeMemoId: - return "id" - case xdr.MemoTypeMemoHash: - return "hash" - case xdr.MemoTypeMemoReturn: - return "return" - default: - panic(fmt.Errorf("invalid memo type: %v", tx.Envelope.Memo().Type)) - } -} - -func memo(tx ingester.LedgerTransaction) (value string, valid bool) { - valid = true - memo := tx.Envelope.Memo() - - switch memo.Type { - case xdr.MemoTypeMemoNone: - value, valid = "", false - - case xdr.MemoTypeMemoText: - scrubbed := scrub(memo.MustText()) - notnull := strings.Join(strings.Split(scrubbed, "\x00"), "") - value = notnull - - case xdr.MemoTypeMemoId: - value = fmt.Sprintf("%d", memo.MustId()) - - case xdr.MemoTypeMemoHash: - hash := memo.MustHash() - value = base64.StdEncoding.EncodeToString(hash[:]) - - case xdr.MemoTypeMemoReturn: - hash := memo.MustRetHash() - value = base64.StdEncoding.EncodeToString(hash[:]) - - default: - panic(fmt.Errorf("invalid memo type: %v", memo.Type)) - } - - return -} - -func memoBytes(envelopeXDR string) (string, error) { - var parsedEnvelope xdr.TransactionEnvelope - if err := xdr.SafeUnmarshalBase64(envelopeXDR, &parsedEnvelope); err != nil { - return "", err - } - - memo := *parsedEnvelope.Memo().Text - return base64.StdEncoding.EncodeToString([]byte(memo)), nil -} - -// scrub ensures that a given string is valid utf-8, replacing any invalid byte -// sequences with the utf-8 replacement character. -func scrub(in string) string { - // First check validity using the stdlib, returning if the string is already - // valid - if utf8.ValidString(in) { - return in - } - - left := []byte(in) - var result bytes.Buffer - - for len(left) > 0 { - r, n := utf8.DecodeRune(left) - result.WriteRune(r) // never errors, only panics - left = left[n:] - } - - return result.String() -} - -func formatTime[T constraints.Integer](t T) string { - return strconv.FormatUint(uint64(t), 10) -} diff --git a/exp/lighthorizon/adapters/transaction_test.go b/exp/lighthorizon/adapters/transaction_test.go deleted file mode 100644 index 5a8ba4ab80..0000000000 --- a/exp/lighthorizon/adapters/transaction_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package adapters - -import ( - "encoding/json" - "net/url" - "os" - "path/filepath" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/ingest" - "github.com/stellar/go/network" - protocol "github.com/stellar/go/protocols/horizon" - "github.com/stellar/go/toid" - "github.com/stellar/go/xdr" -) - -// TestTransactionAdapter confirms that the adapter correctly serializes a -// transaction to JSON by actually pulling a transaction from the -// known-to-be-true horizon.stellar.org, turning it into an "ingested" -// transaction, and serializing it. -func TestTransactionAdapter(t *testing.T) { - f, err := os.Open(filepath.Join("./testdata", "transactions.json")) - require.NoErrorf(t, err, "are fixtures missing?") - - page := protocol.TransactionsPage{} - decoder := json.NewDecoder(f) - require.NoError(t, decoder.Decode(&page)) - require.Len(t, page.Embedded.Records, 1) - expectedTx := page.Embedded.Records[0] - - parsedUrl, err := url.Parse(page.Links.Self.Href) - require.NoError(t, err) - parsedToid, err := strconv.ParseInt(expectedTx.PagingToken(), 10, 64) - require.NoError(t, err) - expectedTxIndex := toid.Parse(parsedToid).TransactionOrder - - txEnv := xdr.TransactionEnvelope{} - txResult := xdr.TransactionResult{} - txMeta := xdr.TransactionMeta{} - txFeeMeta := xdr.LedgerEntryChanges{} - - require.NoError(t, xdr.SafeUnmarshalBase64(expectedTx.EnvelopeXdr, &txEnv)) - require.NoError(t, xdr.SafeUnmarshalBase64(expectedTx.ResultMetaXdr, &txMeta)) - require.NoError(t, xdr.SafeUnmarshalBase64(expectedTx.ResultXdr, &txResult)) - require.NoError(t, xdr.SafeUnmarshalBase64(expectedTx.FeeMetaXdr, &txFeeMeta)) - - closeTimestamp := expectedTx.LedgerCloseTime.UTC().Unix() - - tx := common.Transaction{ - LedgerTransaction: &ingester.LedgerTransaction{ - LedgerTransaction: &ingest.LedgerTransaction{ - Index: 0, - Envelope: txEnv, - Result: xdr.TransactionResultPair{ - TransactionHash: xdr.Hash{}, - Result: txResult, - }, - FeeChanges: txFeeMeta, - UnsafeMeta: txMeta, - }, - }, - LedgerHeader: &xdr.LedgerHeader{ - LedgerSeq: xdr.Uint32(expectedTx.Ledger), - ScpValue: xdr.StellarValue{ - CloseTime: xdr.TimePoint(closeTimestamp), - }, - }, - TxIndex: expectedTxIndex - 1, // TOIDs have a 1-based index - NetworkPassphrase: network.PublicNetworkPassphrase, - } - - result, err := PopulateTransaction(parsedUrl, &tx, xdr.NewEncodingBuffer()) - require.NoError(t, err) - assert.Equal(t, expectedTx, result) -} diff --git a/exp/lighthorizon/build/README.md b/exp/lighthorizon/build/README.md deleted file mode 100644 index d9dcf4556d..0000000000 --- a/exp/lighthorizon/build/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Light Horizon services deployment - -Light Horizon is composed of a few micro services: -* index-batch - contains map and reduce binaries to parallize tx-meta reads and index writes. -* index-single - contains single binary that reads tx-meta and writes indexes. -* ledgerexporter - contains single binary that reads from captive core and writes tx-meta -* web - contains single binary that runs web api which reads from tx-meta and index. - -See [godoc](https://godoc.org/github.com/stellar/go/exp/lighthorizon) for details on each service. - -## Buiding docker images of each service -Each service is packaged into a Docker image, use the helper script included here to build: -`./build.sh ` - -example to build just the mydockerhubname/lighthorizon-index-single:latest image to docker local images, no push to registry: -`./build.sh index-single mydockerhubname latest false` - -example to build images for all the services and push them to mydockerhubname/lighthorizon-:testversion: -`./build.sh all mydockerhubname testversion true` - -## Deploy service images on kubernetes(k8s) -* `k8s/ledgerexporter.yml` - creates a deployment with ledgerexporter image and supporting resources, such as configmap, secret, pvc for captive core on-disk storage. Review the settings to confirm they work in your environment before deployment. -* `k8s/lighthorizon_index.yml` - creates a deployment with index-single image and supporting resources, such as configmap, secret. Review the settings to confirm they work in your environment before deployment. -* `k8s/lighthorizon_web.yml` - creates a deployment with the web image and supporting resources, such as configmap, ingress rule. Review the settings to confirm they work in your environment before deployment. diff --git a/exp/lighthorizon/build/build.sh b/exp/lighthorizon/build/build.sh deleted file mode 100755 index e884fc4914..0000000000 --- a/exp/lighthorizon/build/build.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -e - -# Move to repo root -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$DIR/../../.." -# module name is the sub-folder name under ./build -MODULE=$1 -DOCKER_REPO_PREFIX=$2 -DOCKER_TAG=$3 -DOCKER_PUSH=$4 - -if [ -z "$MODULE" ] ||\ - [ -z "$DOCKER_REPO_PREFIX" ] ||\ - [ -z "$DOCKER_TAG" ] ||\ - [ -z "$DOCKER_PUSH" ]; then - echo "invalid parameters, requires './build.sh '" - exit 1 -fi - -build_target () { - DOCKER_LABEL="$DOCKER_REPO_PREFIX"/lighthorizon-"$MODULE":"$DOCKER_TAG" - docker build --tag $DOCKER_LABEL --platform linux/amd64 -f "exp/lighthorizon/build/$MODULE/Dockerfile" . - if [ "$DOCKER_PUSH" == "true" ]; then - docker push $DOCKER_LABEL - fi -} - -case $MODULE in -index-batch) - build_target - ;; -ledgerexporter) - build_target - ;; -index-single) - build_target - ;; -web) - build_target - ;; -all) - MODULE=index-batch - build_target - MODULE=web - build_target - MODULE=index-single - build_target - MODULE=ledgerexporter - build_target - ;; -*) - echo "unknown MODULE build parameter ('$MODULE'), must be one of all|index-batch|web|index-single|ledgerexporter" - exit 1 - ;; -esac - diff --git a/exp/lighthorizon/build/index-batch/Dockerfile b/exp/lighthorizon/build/index-batch/Dockerfile deleted file mode 100644 index 1780df682f..0000000000 --- a/exp/lighthorizon/build/index-batch/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM golang:1.20 AS builder - -WORKDIR /go/src/github.com/stellar/go -COPY . ./ -RUN go mod download -RUN go install github.com/stellar/go/exp/lighthorizon/index/cmd/batch/map -RUN go install github.com/stellar/go/exp/lighthorizon/index/cmd/batch/reduce - -FROM ubuntu:22.04 -ENV DEBIAN_FRONTEND=noninteractive -# ca-certificates are required to make tls connections -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils -RUN apt-get clean - -COPY --from=builder /go/src/github.com/stellar/go/exp/lighthorizon/build/index-batch/start ./ -COPY --from=builder /go/bin/map ./ -COPY --from=builder /go/bin/reduce ./ -RUN ["chmod", "+x", "/start"] - -ENTRYPOINT ["/start"] diff --git a/exp/lighthorizon/build/index-batch/README.md b/exp/lighthorizon/build/index-batch/README.md deleted file mode 100644 index c300066536..0000000000 --- a/exp/lighthorizon/build/index-batch/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# `stellar/lighthorizon-index-batch` - -This docker image contains the ledger/checkpoint indexing executables. It allows running multiple instances of `map`/`reduce` on a single machine or running it in [AWS Batch](https://aws.amazon.com/batch/). - -## Env variables - -See the [package documentation](../../index/cmd/batch/doc.go) for more details diff --git a/exp/lighthorizon/build/index-batch/start b/exp/lighthorizon/build/index-batch/start deleted file mode 100644 index 88fb5335fb..0000000000 --- a/exp/lighthorizon/build/index-batch/start +++ /dev/null @@ -1,17 +0,0 @@ -#! /usr/bin/env bash -set -e - -# RUN_MODE must be set to 'map' or 'reduce' - -export TRACY_NO_INVARIANT_CHECK=1 -NETWORK_PASSPHRASE="${NETWORK_PASSPHRASE:=Public Global Stellar Network ; September 2015}" -if [ "$RUN_MODE" == "reduce" ]; then - echo "Running Reduce, REDUCE JOBS: $REDUCE_JOB_COUNT MAP JOBS: $MAP_JOB_COUNT TARGET INDEX: $INDEX_TARGET" - /reduce -elif [ "$RUN_MODE" == "map" ]; then - echo "Running Map, TARGET INDEX: $INDEX_TARGET FIRST CHECKPOINT: $FIRST_CHECKPOINT" - /map -else - echo "error: undefined RUN_MODE env variable ('$RUN_MODE'), must be 'map' or 'reduce'" - exit 1 -fi diff --git a/exp/lighthorizon/build/index-single/Dockerfile b/exp/lighthorizon/build/index-single/Dockerfile deleted file mode 100644 index 1473f59f5c..0000000000 --- a/exp/lighthorizon/build/index-single/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM golang:1.20 AS builder - -WORKDIR /go/src/github.com/stellar/go -COPY . ./ -RUN go mod download -RUN go install github.com/stellar/go/exp/lighthorizon/index/cmd/single - -FROM ubuntu:22.04 - -ENV DEBIAN_FRONTEND=noninteractive -# ca-certificates are required to make tls connections -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils -RUN apt-get clean - -COPY --from=builder /go/bin/single ./ - -ENTRYPOINT ./single \ - -source "$TXMETA_SOURCE" \ - -target "$INDEXES_SOURCE" \ - -network-passphrase "$NETWORK_PASSPHRASE" \ - -start "$START" \ - -end "$END" \ - -modules "$MODULES" \ - -watch="$WATCH" \ - -workers "$WORKERS" diff --git a/exp/lighthorizon/build/k8s/ledgerexporter.yml b/exp/lighthorizon/build/k8s/ledgerexporter.yml deleted file mode 100644 index 290dd85c63..0000000000 --- a/exp/lighthorizon/build/k8s/ledgerexporter.yml +++ /dev/null @@ -1,125 +0,0 @@ -# this file contains the ledgerexporter deployment and it's config artifacts. -# -# when applying the manifest on a cluster, make sure to include namespace destination, -# as the manifest does not specify namespace, otherwise it'll go in your current kubectl context. -# -# make sure to set the secrets values, substitue placeholders. -# -# $ kubectl apply -f ledgerexporter.yml -n horizon-dev -apiVersion: v1 -kind: ConfigMap -metadata: - annotations: - fluxcd.io/ignore: "true" - labels: - app: ledgerexporter - name: ledgerexporter-pubnet-env -data: - # when using core 'on disk', the earliest ledger to get streamed out after catchup to 2, is 3 - # whereas on in-memory it streas out 2, adjusted here, otherwise horizon ingest will abort - # and stop process with error that ledger 3 is not <= expected ledger of 2. - START: "0" - END: "0" - - # can only have CONTINUE or START set, not both. - CONTINUE: "true" - WRITE_LATEST_PATH: "true" - CAPTIVE_CORE_USE_DB: "true" - - # configure the network to export - HISTORY_ARCHIVE_URLS: "https://history.stellar.org/prd/core-live/core_live_001,https://history.stellar.org/prd/core-live/core_live_002,https://history.stellar.org/prd/core-live/core_live_003" - NETWORK_PASSPHRASE: "Public Global Stellar Network ; September 2015" - # can refer to canned cfg's for pubnet and testnet which are included on the image - # `/captive-core-pubnet.cfg` or `/captive-core-testnet.cfg`. - # If exporting a standalone network, then mount a volume to the pod container with your standalone core's .cfg, - # and set full path to that volume here - CAPTIVE_CORE_CONFIG: "/captive-core-pubnet.cfg" - - # example of testnet network config. - # HISTORY_ARCHIVE_URLS: "https://history.stellar.org/prd/core-testnet/core_testnet_001,https://history.stellar.org/prd/core-testnet/core_testnet_002" - # NETWORK_PASSPHRASE: "Test SDF Network ; September 2015" - # CAPTIVE_CORE_CONFIG: "/captive-core-testnet.cfg" - - # provide the url for the external s3 bucket to be populated - # update the ledgerexporter-pubnet-secret to have correct aws key/secret for access to the bucket - ARCHIVE_TARGET: "s3://horizon-ledgermeta-prodnet-test" ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app: ledgerexporter - name: ledgerexporter-pubnet-secret -type: Opaque -data: - AWS_REGION: - AWS_ACCESS_KEY_ID: - AWS_SECRET_ACCESS_KEY: ---- -# running captive core with on-disk mode limits RAM to around 2G usage, but -# requires some dedicated disk storage space that has at least 3k IOPS for read/write. -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ledgerexporter-pubnet-core-storage -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 500Gi - storageClassName: default - volumeMode: Filesystem ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - fluxcd.io/ignore: "true" - deployment.kubernetes.io/revision: "3" - labels: - app: ledgerexporter-pubnet - name: ledgerexporter-pubnet-deployment -spec: - selector: - matchLabels: - app: ledgerexporter-pubnet - replicas: 1 - template: - metadata: - annotations: - fluxcd.io/ignore: "true" - # if we expect to add metrics at some point to ledgerexporter - # this just needs to be set to true - prometheus.io/port: "6060" - prometheus.io/scrape: "false" - labels: - app: ledgerexporter-pubnet - spec: - containers: - - envFrom: - - secretRef: - name: ledgerexporter-pubnet-secret - - configMapRef: - name: ledgerexporter-pubnet-env - image: stellar/lighthorizon-ledgerexporter:latest - imagePullPolicy: Always - name: ledgerexporter-pubnet - resources: - limits: - cpu: 3 - memory: 8Gi - requests: - cpu: 500m - memory: 2Gi - volumeMounts: - - mountPath: /cc - name: core-storage - dnsPolicy: ClusterFirst - volumes: - - name: core-storage - persistentVolumeClaim: - claimName: ledgerexporter-pubnet-core-storage - - - diff --git a/exp/lighthorizon/build/k8s/lighthorizon_batch_map_job.yml b/exp/lighthorizon/build/k8s/lighthorizon_batch_map_job.yml deleted file mode 100644 index a2671b66c1..0000000000 --- a/exp/lighthorizon/build/k8s/lighthorizon_batch_map_job.yml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: 'batch-map-job' -spec: - completions: 52 - parallelism: 10 - completionMode: Indexed - template: - spec: - restartPolicy: Never - containers: - - name: 'worker' - image: 'stellar/lighthorizon-index-batch' - imagePullPolicy: Always - envFrom: - - secretRef: - name: - env: - - name: RUN_MODE - value: "map" - - name: BATCH_SIZE - value: "10048" - - name: FIRST_CHECKPOINT - value: "41426080" - - name: WORKER_COUNT - value: "8" - - name: TXMETA_SOURCE - value: "" - - name: JOB_INDEX_ENV - value: "JOB_COMPLETION_INDEX" - - name: NETWORK_PASSPHRASE - value: "pubnet" - - name: INDEX_TARGET - value: "url of target index" - resources: - limits: - cpu: 4 - memory: 5Gi - requests: - cpu: 500m - memory: 500Mi - \ No newline at end of file diff --git a/exp/lighthorizon/build/k8s/lighthorizon_batch_reduce_job.yml b/exp/lighthorizon/build/k8s/lighthorizon_batch_reduce_job.yml deleted file mode 100644 index 1bc9cb7f6c..0000000000 --- a/exp/lighthorizon/build/k8s/lighthorizon_batch_reduce_job.yml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: 'batch-reduce-job' -spec: - completions: 52 - parallelism: 10 - completionMode: Indexed - template: - spec: - restartPolicy: Never - containers: - - name: 'worker' - image: 'stellar/lighthorizon-index-batch' - imagePullPolicy: Always - envFrom: - - secretRef: - name: - env: - - name: RUN_MODE - value: "reduce" - - name: MAP_JOB_COUNT - value: "52" - - name: REDUCE_JOB_COUNT - value: "52" - - name: WORKER_COUNT - value: "8" - - name: INDEX_SOURCE_ROOT - value: "" - - name: JOB_INDEX_ENV - value: JOB_COMPLETION_INDEX - - name: INDEX_TARGET - value: "" - resources: - limits: - cpu: 4 - memory: 5Gi - requests: - cpu: 500m - memory: 500Mi - - \ No newline at end of file diff --git a/exp/lighthorizon/build/k8s/lighthorizon_index.yml b/exp/lighthorizon/build/k8s/lighthorizon_index.yml deleted file mode 100644 index 1e7931fb2a..0000000000 --- a/exp/lighthorizon/build/k8s/lighthorizon_index.yml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - annotations: - fluxcd.io/ignore: "true" - labels: - app: lighthorizon-pubnet-index - name: lighthorizon-pubnet-index-env -data: - TXMETA_SOURCE: "s3://horizon-ledgermeta-prodnet-test" - INDEXES_SOURCE: "s3://horizon-index-prodnet-test" - NETWORK_PASSPHRASE: "Public Global Stellar Network ; September 2015" - START: "41809728" - END: "0" - WATCH: "true" - MODULES: "accounts" - WORKERS: "3" ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app: lighthorizon-pubnet-index - name: lighthorizon-pubnet-index-secret -type: Opaque -data: - AWS_REGION: - AWS_ACCESS_KEY_ID: - AWS_SECRET_ACCESS_KEY: ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - fluxcd.io/ignore: "true" - labels: - app: lighthorizon-pubnet-index - name: lighthorizon-pubnet-index -spec: - replicas: 1 - selector: - matchLabels: - app: lighthorizon-pubnet-index - template: - metadata: - annotations: - fluxcd.io/ignore: "true" - prometheus.io/port: "6060" - prometheus.io/scrape: "false" - labels: - app: lighthorizon-pubnet-index - spec: - containers: - - envFrom: - - secretRef: - name: lighthorizon-pubnet-index-secret - - configMapRef: - name: lighthorizon-pubnet-index-env - image: stellar/lighthorizon-index-single:latest - imagePullPolicy: Always - name: index - ports: - - containerPort: 6060 - name: metrics - protocol: TCP - resources: - limits: - cpu: 3 - memory: 6Gi - requests: - cpu: 500m - memory: 1Gi - - \ No newline at end of file diff --git a/exp/lighthorizon/build/k8s/lighthorizon_web.yml b/exp/lighthorizon/build/k8s/lighthorizon_web.yml deleted file mode 100644 index b680e7fb2c..0000000000 --- a/exp/lighthorizon/build/k8s/lighthorizon_web.yml +++ /dev/null @@ -1,133 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - annotations: - fluxcd.io/ignore: "true" - labels: - app: lighthorizon-pubnet-web - name: lighthorizon-pubnet-web-env -data: - TXMETA_SOURCE: "s3://horizon-indices-pubnet" - INDEXES_SOURCE: "s3://horizon-ledgermeta-pubnet" - NETWORK_PASSPHRASE: "Public Global Stellar Network ; September 2015" - MAX_PARALLEL_DOWNLOADS: 16 - CACHE_PATH: "/ledgercache" - CACHE_PRELOAD_START_LEDGER: 0 - CACHE_PRELOAD_COUNT: 14400 ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app: lighthorizon-pubnet-web - name: lighthorizon-pubnet-web-secret -type: Opaque -data: - AWS_REGION: - AWS_ACCESS_KEY_ID: - AWS_SECRET_ACCESS_KEY: ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - fluxcd.io/ignore: "true" - labels: - app: lighthorizon-pubnet-web - name: lighthorizon-pubnet-web -spec: - replicas: 1 - selector: - matchLabels: - app: lighthorizon-pubnet-web - template: - metadata: - annotations: - fluxcd.io/ignore: "true" - prometheus.io/port: "6060" - prometheus.io/scrape: "false" - creationTimestamp: null - labels: - app: lighthorizon-pubnet-web - spec: - containers: - - envFrom: - - secretRef: - name: lighthorizon-pubnet-web-secret - - configMapRef: - name: lighthorizon-pubnet-web-env - image: stellar/lighthorizon-web:latest - imagePullPolicy: Always - name: web - ports: - - containerPort: 8080 - name: web - protocol: TCP - - containerPort: 6060 - name: metrics - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: / - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - resources: - limits: - cpu: 2 - memory: 4Gi - requests: - cpu: 500m - memory: 1Gi - volumeMounts: - - mountPath: /ledgercache - name: cache-storage - volumes: - - name: cache-storage - emptyDir: {} ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: lighthorizon-pubnet-web - name: lighthorizon-pubnet-web -spec: - ports: - - name: http - port: 8000 - protocol: TCP - targetPort: 8080 - selector: - app: lighthorizon-pubnet-web - sessionAffinity: None - type: ClusterIP ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - cert-manager.io/cluster-issuer: default - ingress.kubernetes.io/ssl-redirect: "true" - kubernetes.io/ingress.class: public - name: lighthorizon-pubnet-web -spec: - rules: - - host: lighthorizon-pubnet.prototypes.kube001.services.stellar-ops.com - http: - paths: - - backend: - service: - name: lighthorizon-pubnet-web - port: - number: 8000 - path: / - pathType: ImplementationSpecific - tls: - - hosts: - - lighthorizon-pubnet.prototypes.kube001.services.stellar-ops.com - secretName: lighthorizon-pubnet-web-cert diff --git a/exp/lighthorizon/build/ledgerexporter/Dockerfile b/exp/lighthorizon/build/ledgerexporter/Dockerfile deleted file mode 100644 index f7129d7be2..0000000000 --- a/exp/lighthorizon/build/ledgerexporter/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -FROM golang:1.20 AS builder - -WORKDIR /go/src/github.com/stellar/go -COPY . ./ -RUN go mod download -RUN go install github.com/stellar/go/exp/services/ledgerexporter - -FROM ubuntu:22.04 -ARG STELLAR_CORE_VERSION -ENV STELLAR_CORE_VERSION=${STELLAR_CORE_VERSION:-*} -ENV STELLAR_CORE_BINARY_PATH /usr/bin/stellar-core - -ENV DEBIAN_FRONTEND=noninteractive -# ca-certificates are required to make tls connections -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils -RUN wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true apt-key add - -RUN echo "deb https://apt.stellar.org jammy stable" >/etc/apt/sources.list.d/SDF.list -RUN echo "deb https://apt.stellar.org jammy unstable" >/etc/apt/sources.list.d/SDF-unstable.list -RUN apt-get update && apt-get install -y stellar-core=${STELLAR_CORE_VERSION} -RUN apt-get clean - -COPY --from=builder /go/src/github.com/stellar/go/exp/lighthorizon/build/ledgerexporter/captive-core-pubnet.cfg / -COPY --from=builder /go/src/github.com/stellar/go/exp/lighthorizon/build/ledgerexporter/captive-core-testnet.cfg / -COPY --from=builder /go/src/github.com/stellar/go/exp/lighthorizon/build/ledgerexporter/start / - -RUN ["chmod", "+x", "/start"] - -# for the captive core sqlite database -RUN mkdir -p /cc - -COPY --from=builder /go/bin/ledgerexporter ./ - -ENTRYPOINT ["/start"] diff --git a/exp/lighthorizon/build/ledgerexporter/README.md b/exp/lighthorizon/build/ledgerexporter/README.md deleted file mode 100644 index 5534b2809a..0000000000 --- a/exp/lighthorizon/build/ledgerexporter/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# `stellar/horizon-ledgerexporter` - -This docker image allows running multiple instances of `ledgerexporter` on a single machine or running it in [AWS Batch](https://aws.amazon.com/batch/). - -## Env variables - -### Running locally - -| Name | Description | -|---------|------------------------| -| `START` | First ledger to export | -| `END` | Last ledger to export | - -### Running in AWS Batch - -| Name | Description | -|----------------------|----------------------------------------------------------------------| -| `BATCH_START_LEDGER` | First ledger of the AWS Batch Job, must be a checkpoint ledger or 1. | -| `BATCH_SIZE` | Size of the batch, must be multiple of 64. | - -#### Example - -When you start 10 jobs with `BATCH_START_LEDGER=63` and `BATCH_SIZE=64` -it will run the following ranges: - -| `AWS_BATCH_JOB_ARRAY_INDEX` | `FROM` | `TO` | -|-----------------------------|--------|------| -| 0 | 63 | 127 | -| 1 | 127 | 191 | -| 2 | 191 | 255 | -| 3 | 255 | 319 | - -## Tips when using AWS Batch - -* In "Job definition" set vCPUs to 2 and Memory to 4096. This represents the `c5.large` instances Horizon should be using. -* In "Compute environments": - * Set instance type to "c5.large". - * Set "Maximum vCPUs" to 2x the number of instances you want to start (because "c5.large" has 2 vCPUs). Ex. 10 vCPUs = 5 x "c5.large" instances. -* Use spot instances! It's much cheaper and speed of testing will be the same in 99% of cases. -* You need to publish the image if there are any changes in `Dockerfile` or one of the scripts. -* When batch processing is over check if instances have been terminated. Sometimes AWS doesn't terminate them. -* Make sure the job timeout is set to a larger value if you export larger ranges. Default is just 100 seconds. diff --git a/exp/lighthorizon/build/ledgerexporter/captive-core-pubnet.cfg b/exp/lighthorizon/build/ledgerexporter/captive-core-pubnet.cfg deleted file mode 100644 index 6379725b8d..0000000000 --- a/exp/lighthorizon/build/ledgerexporter/captive-core-pubnet.cfg +++ /dev/null @@ -1,200 +0,0 @@ -PEER_PORT=11725 -DATABASE = "sqlite3:///cc/stellar.db" - -FAILURE_SAFETY=1 - -EXPERIMENTAL_BUCKETLIST_DB=true - -# WARNING! Do not use this config in production. Quorum sets should -# be carefully selected manually. -NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" -HTTP_PORT=11626 - -[[HOME_DOMAINS]] -HOME_DOMAIN="publicnode.org" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="lobstr.co" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="www.franklintempleton.com" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="satoshipay.io" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="whalestack.com" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="www.stellar.org" -QUALITY="HIGH" - -[[HOME_DOMAINS]] -HOME_DOMAIN="stellar.blockdaemon.com" -QUALITY="HIGH" - -[[VALIDATORS]] -NAME="Boötes" -PUBLIC_KEY="GCVJ4Z6TI6Z2SOGENSPXDQ2U4RKH3CNQKYUHNSSPYFPNWTLGS6EBH7I2" -ADDRESS="bootes.publicnode.org:11625" -HISTORY="curl -sf https://bootes-history.publicnode.org/{0} -o {1}" -HOME_DOMAIN="publicnode.org" - -[[VALIDATORS]] -NAME="Lyra by BP Ventures" -PUBLIC_KEY="GCIXVKNFPKWVMKJKVK2V4NK7D4TC6W3BUMXSIJ365QUAXWBRPPJXIR2Z" -ADDRESS="lyra.publicnode.org:11625" -HISTORY="curl -sf https://lyra-history.publicnode.org/{0} -o {1}" -HOME_DOMAIN="publicnode.org" - -[[VALIDATORS]] -NAME="Hercules by OG Technologies" -PUBLIC_KEY="GBLJNN3AVZZPG2FYAYTYQKECNWTQYYUUY2KVFN2OUKZKBULXIXBZ4FCT" -ADDRESS="hercules.publicnode.org:11625" -HISTORY="curl -sf https://hercules-history.publicnode.org/{0} -o {1}" -HOME_DOMAIN="publicnode.org" - -[[VALIDATORS]] -NAME="LOBSTR 3 (North America)" -PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" -ADDRESS="v3.stellar.lobstr.co:11625" -HISTORY="curl -sf https://archive.v3.stellar.lobstr.co/{0} -o {1}" -HOME_DOMAIN="lobstr.co" - -[[VALIDATORS]] -NAME="LOBSTR 1 (Europe)" -PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" -ADDRESS="v1.stellar.lobstr.co:11625" -HISTORY="curl -sf https://archive.v1.stellar.lobstr.co/{0} -o {1}" -HOME_DOMAIN="lobstr.co" - -[[VALIDATORS]] -NAME="LOBSTR 2 (Europe)" -PUBLIC_KEY="GCB2VSADESRV2DDTIVTFLBDI562K6KE3KMKILBHUHUWFXCUBHGQDI7VL" -ADDRESS="v2.stellar.lobstr.co:11625" -HISTORY="curl -sf https://archive.v2.stellar.lobstr.co/{0} -o {1}" -HOME_DOMAIN="lobstr.co" - -[[VALIDATORS]] -NAME="LOBSTR 4 (Asia)" -PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" -ADDRESS="v4.stellar.lobstr.co:11625" -HISTORY="curl -sf https://archive.v4.stellar.lobstr.co/{0} -o {1}" -HOME_DOMAIN="lobstr.co" - -[[VALIDATORS]] -NAME="LOBSTR 5 (India)" -PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" -ADDRESS="v5.stellar.lobstr.co:11625" -HISTORY="curl -sf https://archive.v5.stellar.lobstr.co/{0} -o {1}" -HOME_DOMAIN="lobstr.co" - -[[VALIDATORS]] -NAME="FT SCV 2" -PUBLIC_KEY="GCMSM2VFZGRPTZKPH5OABHGH4F3AVS6XTNJXDGCZ3MKCOSUBH3FL6DOB" -ADDRESS="stellar2.franklintempleton.com:11625" -HISTORY="curl -sf https://stellar-history-usc.franklintempleton.com/azuscshf401/{0} -o {1}" -HOME_DOMAIN="www.franklintempleton.com" - -[[VALIDATORS]] -NAME="FT SCV 3" -PUBLIC_KEY="GA7DV63PBUUWNUFAF4GAZVXU2OZMYRATDLKTC7VTCG7AU4XUPN5VRX4A" -ADDRESS="stellar3.franklintempleton.com:11625" -HISTORY="curl -sf https://stellar-history-ins.franklintempleton.com/azinsshf401/{0} -o {1}" -HOME_DOMAIN="www.franklintempleton.com" - -[[VALIDATORS]] -NAME="FT SCV 1" -PUBLIC_KEY="GARYGQ5F2IJEBCZJCBNPWNWVDOFK7IBOHLJKKSG2TMHDQKEEC6P4PE4V" -ADDRESS="stellar1.franklintempleton.com:11625" -HISTORY="curl -sf https://stellar-history-usw.franklintempleton.com/azuswshf401/{0} -o {1}" -HOME_DOMAIN="www.franklintempleton.com" - -[[VALIDATORS]] -NAME="SatoshiPay Frankfurt" -PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" -ADDRESS="stellar-de-fra.satoshipay.io:11625" -HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" -HOME_DOMAIN="satoshipay.io" - -[[VALIDATORS]] -NAME="SatoshiPay Singapore" -PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" -ADDRESS="stellar-sg-sin.satoshipay.io:11625" -HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" -HOME_DOMAIN="satoshipay.io" - -[[VALIDATORS]] -NAME="SatoshiPay Iowa" -PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" -ADDRESS="stellar-us-iowa.satoshipay.io:11625" -HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" -HOME_DOMAIN="satoshipay.io" - -[[VALIDATORS]] -NAME="Whalestack (Germany)" -PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" -ADDRESS="germany.stellar.whalestack.com:11625" -HISTORY="curl -sf https://germany.stellar.whalestack.com/history/{0} -o {1}" -HOME_DOMAIN="whalestack.com" - -[[VALIDATORS]] -NAME="Whalestack (Hong Kong)" -PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" -ADDRESS="hongkong.stellar.whalestack.com:11625" -HISTORY="curl -sf https://hongkong.stellar.whalestack.com/history/{0} -o {1}" -HOME_DOMAIN="whalestack.com" - -[[VALIDATORS]] -NAME="Whalestack (Finland)" -PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" -ADDRESS="finland.stellar.whalestack.com:11625" -HISTORY="curl -sf https://finland.stellar.whalestack.com/history/{0} -o {1}" -HOME_DOMAIN="whalestack.com" - -[[VALIDATORS]] -NAME="SDF 2" -PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" -ADDRESS="core-live-b.stellar.org:11625" -HISTORY="curl -sf http://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" -HOME_DOMAIN="www.stellar.org" - -[[VALIDATORS]] -NAME="SDF 1" -PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" -ADDRESS="core-live-a.stellar.org:11625" -HISTORY="curl -sf http://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" -HOME_DOMAIN="www.stellar.org" - -[[VALIDATORS]] -NAME="SDF 3" -PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" -ADDRESS="core-live-c.stellar.org:11625" -HISTORY="curl -sf http://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" -HOME_DOMAIN="www.stellar.org" - -[[VALIDATORS]] -NAME="Blockdaemon Validator 3" -PUBLIC_KEY="GAYXZ4PZ7P6QOX7EBHPIZXNWY4KCOBYWJCA4WKWRKC7XIUS3UJPT6EZ4" -ADDRESS="stellar-full-validator3.bdnodes.net:11625" -HISTORY="curl -sf https://stellar-full-history3.bdnodes.net/{0} -o {1}" -HOME_DOMAIN="stellar.blockdaemon.com" - -[[VALIDATORS]] -NAME="Blockdaemon Validator 2" -PUBLIC_KEY="GAVXB7SBJRYHSG6KSQHY74N7JAFRL4PFVZCNWW2ARI6ZEKNBJSMSKW7C" -ADDRESS="stellar-full-validator2.bdnodes.net:11625" -HISTORY="curl -sf https://stellar-full-history2.bdnodes.net/{0} -o {1}" -HOME_DOMAIN="stellar.blockdaemon.com" - -[[VALIDATORS]] -NAME="Blockdaemon Validator 1" -PUBLIC_KEY="GAAV2GCVFLNN522ORUYFV33E76VPC22E72S75AQ6MBR5V45Z5DWVPWEU" -ADDRESS="stellar-full-validator1.bdnodes.net:11625" -HISTORY="curl -sf https://stellar-full-history1.bdnodes.net/{0} -o {1}" -HOME_DOMAIN="stellar.blockdaemon.com" \ No newline at end of file diff --git a/exp/lighthorizon/build/ledgerexporter/captive-core-testnet.cfg b/exp/lighthorizon/build/ledgerexporter/captive-core-testnet.cfg deleted file mode 100644 index 9c7dadc527..0000000000 --- a/exp/lighthorizon/build/ledgerexporter/captive-core-testnet.cfg +++ /dev/null @@ -1,32 +0,0 @@ -PEER_PORT=11725 -DATABASE = "sqlite3:///cc/stellar.db" - -UNSAFE_QUORUM=true -FAILURE_SAFETY=1 - -EXPERIMENTAL_BUCKETLIST_DB=true - -[[HOME_DOMAINS]] -HOME_DOMAIN="testnet.stellar.org" -QUALITY="HIGH" - -[[VALIDATORS]] -NAME="sdf_testnet_1" -HOME_DOMAIN="testnet.stellar.org" -PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" -ADDRESS="core-testnet1.stellar.org" -HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" - -[[VALIDATORS]] -NAME="sdf_testnet_2" -HOME_DOMAIN="testnet.stellar.org" -PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" -ADDRESS="core-testnet2.stellar.org" -HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" - -[[VALIDATORS]] -NAME="sdf_testnet_3" -HOME_DOMAIN="testnet.stellar.org" -PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" -ADDRESS="core-testnet3.stellar.org" -HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/exp/lighthorizon/build/ledgerexporter/start b/exp/lighthorizon/build/ledgerexporter/start deleted file mode 100644 index 11d863effa..0000000000 --- a/exp/lighthorizon/build/ledgerexporter/start +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env bash -set -e - -START="${START:=2}" -END="${END:=0}" -CONTINUE="${CONTINUE:=false}" -# Writing to /latest is disabled by default to avoid race conditions between parallel container runs -WRITE_LATEST_PATH="${WRITE_LATEST_PATH:=false}" - -# config defaults to pubnet core, any other network requires setting all 3 of these in container env -NETWORK_PASSPHRASE="${NETWORK_PASSPHRASE:=Public Global Stellar Network ; September 2015}" -HISTORY_ARCHIVE_URLS="${HISTORY_ARCHIVE_URLS:=https://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_001}" -CAPTIVE_CORE_CONFIG="${CAPTIVE_CORE_CONFIG:=/captive-core-pubnet.cfg}" - -CAPTIVE_CORE_USE_DB="${CAPTIVE_CORE_USE_DB:=true}" - -if [ -z "$ARCHIVE_TARGET" ]; then - echo "error: undefined ARCHIVE_TARGET env variable" - exit 1 -fi - -# Calculate params for AWS Batch -if [ ! -z "$AWS_BATCH_JOB_ARRAY_INDEX" ]; then - # The batch should have three env variables: - # * BATCH_START_LEDGER - start ledger of the job, must be equal 1 or a - # checkpoint ledger (i + 1) % 64 == 0. - # * BATCH_SIZE - size of the batch in ledgers, must be multiple of 64! - # * BRANCH - git branch to build - # - # Ex: BATCH_START_LEDGER=63, BATCH_SIZE=64 will create the following ranges: - # AWS_BATCH_JOB_ARRAY_INDEX=0: [63, 127] - # AWS_BATCH_JOB_ARRAY_INDEX=1: [127, 191] - # AWS_BATCH_JOB_ARRAY_INDEX=2: [191, 255] - # AWS_BATCH_JOB_ARRAY_INDEX=3: [255, 319] - # ... - START=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER"` - END=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER" + "$BATCH_SIZE"` - - if [ "$START" -lt 2 ]; then - # The minimum ledger expected by the ledger exporter is 2 - START=2 - fi - -fi - -echo "START: $START END: $END" - -export TRACY_NO_INVARIANT_CHECK=1 -/ledgerexporter --target "$ARCHIVE_TARGET" \ - --captive-core-toml-path "$CAPTIVE_CORE_CONFIG" \ - --history-archive-urls "$HISTORY_ARCHIVE_URLS" --network-passphrase "$NETWORK_PASSPHRASE" \ - --continue="$CONTINUE" --write-latest-path="$WRITE_LATEST_PATH" \ - --start-ledger "$START" --end-ledger "$END" --captive-core-use-db="$CAPTIVE_CORE_USE_DB" - -echo "OK" diff --git a/exp/lighthorizon/build/web/Dockerfile b/exp/lighthorizon/build/web/Dockerfile deleted file mode 100644 index 83d0002ebc..0000000000 --- a/exp/lighthorizon/build/web/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM golang:1.20 AS builder - -WORKDIR /go/src/github.com/stellar/go -COPY . ./ -RUN go mod download -RUN go install github.com/stellar/go/exp/lighthorizon - -FROM ubuntu:22.04 - -ENV DEBIAN_FRONTEND=noninteractive -# ca-certificates are required to make tls connections -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils -RUN apt-get clean - -COPY --from=builder /go/bin/lighthorizon ./ - -ENTRYPOINT ./lighthorizon serve \ - --network-passphrase "$NETWORK_PASSPHRASE" \ - --parallel-downloads "$MAX_PARALLEL_DOWNLOADS" \ - --ledger-cache "$CACHE_PATH" \ - --ledger-cache-preload "$CACHE_PRELOAD_COUNT" \ - --ledger-cache-preload-start "$CACHE_PRELOAD_START_LEDGER" \ - --log-level debug \ - "$TXMETA_SOURCE" "$INDEXES_SOURCE" diff --git a/exp/lighthorizon/common/operation.go b/exp/lighthorizon/common/operation.go deleted file mode 100644 index ca5f7bfe61..0000000000 --- a/exp/lighthorizon/common/operation.go +++ /dev/null @@ -1,52 +0,0 @@ -package common - -import ( - "encoding/hex" - - "github.com/stellar/go/network" - "github.com/stellar/go/toid" - "github.com/stellar/go/xdr" -) - -type Operation struct { - TransactionEnvelope *xdr.TransactionEnvelope - TransactionResult *xdr.TransactionResult - LedgerHeader *xdr.LedgerHeader - OpIndex int32 - TxIndex int32 -} - -func (o *Operation) Get() *xdr.Operation { - return &o.TransactionEnvelope.Operations()[o.OpIndex] -} - -func (o *Operation) OperationResult() *xdr.OperationResultTr { - results, _ := o.TransactionResult.OperationResults() - tr := results[o.OpIndex].MustTr() - return &tr -} - -func (o *Operation) TransactionHash() (string, error) { - hash, err := network.HashTransactionInEnvelope(*o.TransactionEnvelope, network.PublicNetworkPassphrase) - if err != nil { - return "", err - } - - return hex.EncodeToString(hash[:]), nil -} - -func (o *Operation) SourceAccount() xdr.AccountId { - sourceAccount := o.TransactionEnvelope.SourceAccount().ToAccountId() - if o.Get().SourceAccount != nil { - sourceAccount = o.Get().SourceAccount.ToAccountId() - } - return sourceAccount -} - -func (o *Operation) TOID() int64 { - return toid.New( - int32(o.LedgerHeader.LedgerSeq), - o.TxIndex+1, - o.OpIndex+1, - ).ToInt64() -} diff --git a/exp/lighthorizon/common/transaction.go b/exp/lighthorizon/common/transaction.go deleted file mode 100644 index 104fd3bc6b..0000000000 --- a/exp/lighthorizon/common/transaction.go +++ /dev/null @@ -1,70 +0,0 @@ -package common - -import ( - "encoding/hex" - "errors" - - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/network" - "github.com/stellar/go/toid" - "github.com/stellar/go/xdr" -) - -type Transaction struct { - *ingester.LedgerTransaction - LedgerHeader *xdr.LedgerHeader - TxIndex int32 - - NetworkPassphrase string -} - -// type Transaction struct { -// TransactionEnvelope *xdr.TransactionEnvelope -// TransactionResult *xdr.TransactionResult -// } - -func (tx *Transaction) TransactionHash() (string, error) { - if tx.NetworkPassphrase == "" { - return "", errors.New("network passphrase unspecified") - } - - hash, err := network.HashTransactionInEnvelope(tx.Envelope, tx.NetworkPassphrase) - if err != nil { - return "", err - } - - return hex.EncodeToString(hash[:]), nil -} - -func (o *Transaction) SourceAccount() xdr.MuxedAccount { - return o.Envelope.SourceAccount() -} - -func (tx *Transaction) TOID() int64 { - return toid.New( - int32(tx.LedgerHeader.LedgerSeq), - // TOID indexing is 1-based, so the 1st tx comes at position 1, - tx.TxIndex+1, - // but the TOID of a transaction comes BEFORE any operation - 0, - ).ToInt64() -} - -func (tx *Transaction) HasPreconditions() bool { - switch pc := tx.Envelope.Preconditions(); pc.Type { - case xdr.PreconditionTypePrecondNone: - return false - case xdr.PreconditionTypePrecondTime: - return pc.TimeBounds != nil - case xdr.PreconditionTypePrecondV2: - // TODO: 2x check these - return (pc.V2.TimeBounds != nil || - pc.V2.LedgerBounds != nil || - pc.V2.MinSeqNum != nil || - pc.V2.MinSeqAge > 0 || - pc.V2.MinSeqLedgerGap > 0 || - len(pc.V2.ExtraSigners) > 0) - } - - return false -} diff --git a/exp/lighthorizon/http.go b/exp/lighthorizon/http.go deleted file mode 100644 index e61ad4c716..0000000000 --- a/exp/lighthorizon/http.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "net/http" - "strconv" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - "github.com/stellar/go/exp/lighthorizon/actions" - "github.com/stellar/go/exp/lighthorizon/services" - supportHttp "github.com/stellar/go/support/http" - "github.com/stellar/go/support/render/problem" -) - -func newWrapResponseWriter(w http.ResponseWriter, r *http.Request) middleware.WrapResponseWriter { - mw, ok := w.(middleware.WrapResponseWriter) - if !ok { - mw = middleware.NewWrapResponseWriter(w, r.ProtoMajor) - } - - return mw -} - -func prometheusMiddleware(requestDurationMetric *prometheus.SummaryVec) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - route := supportHttp.GetChiRoutePattern(r) - mw := newWrapResponseWriter(w, r) - - then := time.Now() - next.ServeHTTP(mw, r) - duration := time.Since(then) - - requestDurationMetric.With(prometheus.Labels{ - "status": strconv.FormatInt(int64(mw.Status()), 10), - "method": r.Method, - "route": route, - }).Observe(float64(duration.Seconds())) - }) - } -} - -func lightHorizonHTTPHandler(registry *prometheus.Registry, lightHorizon services.LightHorizon) http.Handler { - requestDurationMetric := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Namespace: "horizon_lite", Subsystem: "http", Name: "requests_duration_seconds", - Help: "HTTP requests durations, sliding window = 10m", - }, - []string{"status", "method", "route"}, - ) - registry.MustRegister(requestDurationMetric) - - router := chi.NewMux() - router.Use(prometheusMiddleware(requestDurationMetric)) - - router.Route("/accounts/{account_id}", func(r chi.Router) { - r.MethodFunc(http.MethodGet, "/transactions", actions.NewTXByAccountHandler(lightHorizon)) - r.MethodFunc(http.MethodGet, "/operations", actions.NewOpsByAccountHandler(lightHorizon)) - }) - - router.MethodFunc(http.MethodGet, "/", actions.Root(actions.RootResponse{ - Version: HorizonLiteVersion, - // by default, no other fields are known yet - })) - router.MethodFunc(http.MethodGet, "/api", actions.ApiDocs()) - router.Method(http.MethodGet, "/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - - problem.RegisterHost("") - router.NotFound(func(w http.ResponseWriter, request *http.Request) { - problem.Render(request.Context(), w, problem.NotFound) - }) - - return router -} diff --git a/exp/lighthorizon/http_test.go b/exp/lighthorizon/http_test.go deleted file mode 100644 index f59e2719d5..0000000000 --- a/exp/lighthorizon/http_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package main - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/stellar/go/exp/lighthorizon/actions" - "github.com/stellar/go/exp/lighthorizon/services" - "github.com/stellar/go/support/render/problem" -) - -func TestUnknownUrl(t *testing.T) { - recorder := httptest.NewRecorder() - request, err := http.NewRequest("GET", "/unknown", nil) - require.NoError(t, err) - - prepareTestHttpHandler().ServeHTTP(recorder, request) - - resp := recorder.Result() - assert.Equal(t, http.StatusNotFound, resp.StatusCode) - - raw, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - - var problem problem.P - err = json.Unmarshal(raw, &problem) - assert.NoError(t, err) - assert.Equal(t, "Resource Missing", problem.Title) - assert.Equal(t, "not_found", problem.Type) -} - -func TestRootResponse(t *testing.T) { - recorder := httptest.NewRecorder() - request, err := http.NewRequest("GET", "/", nil) - require.NoError(t, err) - - prepareTestHttpHandler().ServeHTTP(recorder, request) - - var root actions.RootResponse - raw, err := io.ReadAll(recorder.Result().Body) - require.NoError(t, err) - require.NoError(t, json.Unmarshal(raw, &root)) - require.Equal(t, HorizonLiteVersion, root.Version) -} - -func prepareTestHttpHandler() http.Handler { - mockOperationService := &services.MockOperationService{} - mockTransactionService := &services.MockTransactionService{} - registry := prometheus.NewRegistry() - - lh := services.LightHorizon{ - Operations: mockOperationService, - Transactions: mockTransactionService, - } - - return lightHorizonHTTPHandler(registry, lh) -} diff --git a/exp/lighthorizon/index/Makefile b/exp/lighthorizon/index/Makefile deleted file mode 100644 index 38361d7d37..0000000000 --- a/exp/lighthorizon/index/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -XDRS = xdr/LightHorizon-types.x - -XDRGEN_COMMIT=3f6808cd161d72474ffbe9eedbd7013de7f92748 - -.PHONY: xdr clean update - -xdr/xdr_generated.go: $(XDRS) - docker run -it --rm -v $$PWD:/wd -w /wd ruby /bin/bash -c '\ - gem install specific_install -v 0.3.7 && \ - gem specific_install https://github.com/stellar/xdrgen.git -b $(XDRGEN_COMMIT) && \ - xdrgen \ - --language go \ - --namespace xdr \ - --output xdr/ \ - $(XDRS)' - ls -lAh - go fmt $@ - -xdr: xdr/xdr_generated.go - -clean: - rm ./xdr/xdr_generated.go || true - -update: clean xdr diff --git a/exp/lighthorizon/index/backend/backend.go b/exp/lighthorizon/index/backend/backend.go deleted file mode 100644 index 580e5f4d6e..0000000000 --- a/exp/lighthorizon/index/backend/backend.go +++ /dev/null @@ -1,14 +0,0 @@ -package index - -import types "github.com/stellar/go/exp/lighthorizon/index/types" - -// TODO: Use a more standardized filesystem-style backend, so we can re-use -// code -type Backend interface { - Flush(map[string]types.NamedIndices) error - FlushAccounts([]string) error - Read(account string) (types.NamedIndices, error) - ReadAccounts() ([]string, error) - FlushTransactions(map[string]*types.TrieIndex) error - ReadTransactions(prefix string) (*types.TrieIndex, error) -} diff --git a/exp/lighthorizon/index/backend/file.go b/exp/lighthorizon/index/backend/file.go deleted file mode 100644 index 062b1efcdb..0000000000 --- a/exp/lighthorizon/index/backend/file.go +++ /dev/null @@ -1,214 +0,0 @@ -package index - -import ( - "bufio" - "compress/gzip" - "io" - "io/fs" - "os" - "path/filepath" - - types "github.com/stellar/go/exp/lighthorizon/index/types" - - "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" -) - -type FileBackend struct { - dir string - parallel uint32 -} - -// NewFileBackend connects to indices stored at `dir`, creating the directory if one doesn't -// exist, and uses `parallel` to control how many workers to use when flushing to disk. -func NewFileBackend(dir string, parallel uint32) (*FileBackend, error) { - if parallel <= 0 { - parallel = 1 - } - - err := os.MkdirAll(dir, fs.ModeDir|0755) - if err != nil { - log.Errorf("Unable to mkdir %s, %v", dir, err) - return nil, err - } - - return &FileBackend{ - dir: dir, - parallel: parallel, - }, nil -} - -func (s *FileBackend) Flush(indexes map[string]types.NamedIndices) error { - return parallelFlush(s.parallel, indexes, s.writeBatch) -} - -func (s *FileBackend) FlushAccounts(accounts []string) error { - path := filepath.Join(s.dir, "accounts") - - f, err := os.OpenFile(path, os.O_CREATE| - os.O_APPEND| // crucial! since we might flush from various sources - os.O_WRONLY, - 0664) // rw-rw-r-- - - if err != nil { - return errors.Wrapf(err, "failed to open account file at %s", path) - } - - defer f.Close() - - // We write one account at a time because writes that occur within a single - // `write()` syscall are thread-safe. A larger write might be split into - // many calls and thus get interleaved, so we play it safe. - for _, account := range accounts { - f.Write([]byte(account + "\n")) - } - - return nil -} - -func (s *FileBackend) writeBatch(b *batch) error { - if len(b.indexes) == 0 { - return nil - } - - path := filepath.Join(s.dir, b.account[:3], b.account) - - err := os.MkdirAll(filepath.Dir(path), fs.ModeDir|0755) - if err != nil { - log.Errorf("Unable to mkdir %s, %v", filepath.Dir(path), err) - return nil - } - - f, err := os.Create(path) - if err != nil { - log.Errorf("Unable to create %s: %v", path, err) - return nil - } - defer f.Close() - - if _, err := writeGzippedTo(f, b.indexes); err != nil { - log.Errorf("Unable to serialize %s: %v", b.account, err) - return nil - } - - return nil -} - -func (s *FileBackend) FlushTransactions(indexes map[string]*types.TrieIndex) error { - // TODO: Parallelize this - for key, index := range indexes { - path := filepath.Join(s.dir, "tx", key) - - err := os.MkdirAll(filepath.Dir(path), fs.ModeDir|0755) - if err != nil { - log.Errorf("Unable to mkdir %s, %v", filepath.Dir(path), err) - continue - } - - f, err := os.Create(path) - if err != nil { - log.Errorf("Unable to create %s: %v", path, err) - continue - } - - zw := gzip.NewWriter(f) - if _, err := index.WriteTo(zw); err != nil { - log.Errorf("Unable to serialize %s: %v", path, err) - f.Close() - continue - } - - if err := zw.Close(); err != nil { - log.Errorf("Unable to serialize %s: %v", path, err) - f.Close() - continue - } - - if err := f.Close(); err != nil { - log.Errorf("Unable to save %s: %v", path, err) - } - } - return nil -} - -func (s *FileBackend) Read(account string) (types.NamedIndices, error) { - log.Debugf("Opening index: %s", account) - b, err := os.Open(filepath.Join(s.dir, account[:3], account)) - if err != nil { - return nil, err - } - defer b.Close() - - indexes, _, err := readGzippedFrom(bufio.NewReader(b)) - if err != nil { - log.Errorf("Unable to parse %s: %v", account, err) - return nil, os.ErrNotExist - } - return indexes, nil -} - -func (s *FileBackend) ReadAccounts() ([]string, error) { - path := filepath.Join(s.dir, "accounts") - log.Debugf("Opening accounts list at %s", path) - - f, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "failed to open %s", path) - } - - const gAddressSize = 56 - - // We ballpark the capacity assuming all of the values being G-addresses. - preallocationSize := 100 * gAddressSize // default to 100 lines - info, err := os.Stat(path) - if err == nil { // we can still safely continue w/ errors - // Note that this will never be too large, but may be too small. - preallocationSize = int(info.Size()) / (gAddressSize + 1) // +1 for \n - } - accountMap := set.NewSet[string](preallocationSize) - accounts := make([]string, 0, preallocationSize) - - reader := bufio.NewReaderSize(f, 100*gAddressSize) // reasonable buffer size - for { - line, err := reader.ReadString(byte('\n')) - if err == io.EOF { - break - } else if err != nil { - return accounts, errors.Wrapf(err, "failed to read %s", path) - } - - account := line[:len(line)-1] // trim newline - - // The account list is very unlikely to be unique (especially if it was made - // w/ parallel flushes), so let's ensure that that's the case. - if !accountMap.Contains(account) { - accountMap.Add(account) - accounts = append(accounts, account) - } - } - - return accounts, nil -} - -func (s *FileBackend) ReadTransactions(prefix string) (*types.TrieIndex, error) { - log.Debugf("Opening index: %s", prefix) - b, err := os.Open(filepath.Join(s.dir, "tx", prefix)) - if err != nil { - return nil, err - } - defer b.Close() - zr, err := gzip.NewReader(b) - if err != nil { - log.Errorf("Unable to parse %s: %v", prefix, err) - return nil, os.ErrNotExist - } - defer zr.Close() - var index types.TrieIndex - _, err = index.ReadFrom(zr) - if err != nil { - log.Errorf("Unable to parse %s: %v", prefix, err) - return nil, os.ErrNotExist - } - return &index, nil -} diff --git a/exp/lighthorizon/index/backend/file_test.go b/exp/lighthorizon/index/backend/file_test.go deleted file mode 100644 index 6197f7b5c3..0000000000 --- a/exp/lighthorizon/index/backend/file_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package index - -import ( - "math/rand" - "testing" - - "github.com/stellar/go/keypair" - "github.com/stellar/go/xdr" - "github.com/stretchr/testify/require" -) - -func TestSimpleFileStore(t *testing.T) { - tmpDir := t.TempDir() - - // Create a large (beyond a single chunk) list of arbitrary accounts, some - // regular and some muxed. - accountList := make([]string, 123) - for i := range accountList { - var err error - var muxed xdr.MuxedAccount - address := keypair.MustRandom().Address() - - if rand.Intn(2) == 1 { - muxed, err = xdr.MuxedAccountFromAccountId(address, 12345678) - require.NoErrorf(t, err, "shouldn't happen") - } else { - muxed = xdr.MustMuxedAddress(address) - } - - accountList[i] = muxed.Address() - } - - require.Len(t, accountList, 123) - - file, err := NewFileBackend(tmpDir, 1) - require.NoError(t, err) - - require.NoError(t, file.FlushAccounts(accountList)) - - accounts, err := file.ReadAccounts() - require.NoError(t, err) - require.Equal(t, accountList, accounts) -} diff --git a/exp/lighthorizon/index/backend/gzip.go b/exp/lighthorizon/index/backend/gzip.go deleted file mode 100644 index 63c8e332c2..0000000000 --- a/exp/lighthorizon/index/backend/gzip.go +++ /dev/null @@ -1,74 +0,0 @@ -package index - -import ( - "bytes" - "compress/gzip" - "errors" - "io" - - types "github.com/stellar/go/exp/lighthorizon/index/types" -) - -func writeGzippedTo(w io.Writer, indexes types.NamedIndices) (int64, error) { - zw := gzip.NewWriter(w) - - var n int64 - for id, index := range indexes { - zw.Name = id - nWrote, err := io.Copy(zw, index.Buffer()) - n += nWrote - if err != nil { - return n, err - } - - if err := zw.Close(); err != nil { - return n, err - } - - zw.Reset(w) - } - - return n, nil -} - -func readGzippedFrom(r io.Reader) (types.NamedIndices, int64, error) { - if _, ok := r.(io.ByteReader); !ok { - return nil, 0, errors.New("reader *must* implement ByteReader") - } - - zr, err := gzip.NewReader(r) - if err != nil { - return nil, 0, err - } - - indexes := types.NamedIndices{} - var buf bytes.Buffer - var n int64 - for { - zr.Multistream(false) - - nRead, err := io.Copy(&buf, zr) - n += nRead - if err != nil { - return nil, n, err - } - - ind, err := types.NewBitmapIndex(buf.Bytes()) - if err != nil { - return nil, n, err - } - - indexes[zr.Name] = ind - - buf.Reset() - - err = zr.Reset(r) - if err == io.EOF { - break - } else if err != nil { - return nil, n, err - } - } - - return indexes, n, zr.Close() -} diff --git a/exp/lighthorizon/index/backend/gzip_test.go b/exp/lighthorizon/index/backend/gzip_test.go deleted file mode 100644 index 730e13185d..0000000000 --- a/exp/lighthorizon/index/backend/gzip_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package index - -import ( - "bufio" - "bytes" - "math/rand" - "os" - "path/filepath" - "testing" - - types "github.com/stellar/go/exp/lighthorizon/index/types" - "github.com/stretchr/testify/require" -) - -func TestGzipRoundtrip(t *testing.T) { - index := &types.BitmapIndex{} - anotherIndex := &types.BitmapIndex{} - for i := 0; i < 100+rand.Intn(1000); i++ { - index.SetActive(uint32(rand.Intn(10_000))) - anotherIndex.SetActive(uint32(rand.Intn(10_000))) - } - - indices := types.NamedIndices{ - "a": index, - "short/name": anotherIndex, - "slightlyLonger/name": index, - } - - var buf bytes.Buffer - wroteBytes, err := writeGzippedTo(&buf, indices) - require.NoError(t, err) - require.Greater(t, wroteBytes, int64(0)) - - gz := filepath.Join(t.TempDir(), "test.gzip") - require.NoError(t, os.WriteFile(gz, buf.Bytes(), 0644)) - f, err := os.Open(gz) - require.NoError(t, err) - defer f.Close() - - // Ensure that reading directly from a file errors out. - _, _, err = readGzippedFrom(f) - require.Error(t, err) - - read, readBytes, err := readGzippedFrom(bufio.NewReader(f)) - require.NoError(t, err) - require.Greater(t, readBytes, int64(0)) - - require.Equal(t, indices, read) - require.Equal(t, wroteBytes, readBytes) - require.Len(t, read, len(indices)) - - for name, index := range indices { - raw1, err := index.ToXDR().MarshalBinary() - require.NoError(t, err) - - raw2, err := read[name].ToXDR().MarshalBinary() - require.NoError(t, err) - - require.Equal(t, raw1, raw2) - } -} diff --git a/exp/lighthorizon/index/backend/parallel_flush.go b/exp/lighthorizon/index/backend/parallel_flush.go deleted file mode 100644 index 6f65bedc42..0000000000 --- a/exp/lighthorizon/index/backend/parallel_flush.go +++ /dev/null @@ -1,73 +0,0 @@ -package index - -import ( - "sync" - "sync/atomic" - "time" - - types "github.com/stellar/go/exp/lighthorizon/index/types" - "github.com/stellar/go/support/log" -) - -type batch struct { - account string - indexes types.NamedIndices -} - -type flushBatch func(b *batch) error - -func parallelFlush(parallel uint32, allIndexes map[string]types.NamedIndices, f flushBatch) error { - var wg sync.WaitGroup - - batches := make(chan *batch, parallel) - - wg.Add(1) - go func() { - // forces this async func to be waited on also, otherwise the outer - // method returns before this finishes. - defer wg.Done() - - for account, indexes := range allIndexes { - batches <- &batch{ - account: account, - indexes: indexes, - } - } - - if len(allIndexes) == 0 { - close(batches) - } - }() - - written := uint64(0) - for i := uint32(0); i < parallel; i++ { - wg.Add(1) - go func(workerNum uint32) { - defer wg.Done() - for batch := range batches { - if err := f(batch); err != nil { - log.Errorf("Error occurred writing batch: %v, retrying...", err) - time.Sleep(50 * time.Millisecond) - batches <- batch - continue - } - - nwritten := atomic.AddUint64(&written, 1) - if nwritten%1234 == 0 { - log.WithField("worker", workerNum). - Infof("Writing indices... %d/%d (%.2f%%)", - nwritten, len(allIndexes), - (float64(nwritten)/float64(len(allIndexes)))*100) - } - - if nwritten == uint64(len(allIndexes)) { - close(batches) - } - } - }(i) - } - - wg.Wait() - - return nil -} diff --git a/exp/lighthorizon/index/backend/s3.go b/exp/lighthorizon/index/backend/s3.go deleted file mode 100644 index a4f5a7e751..0000000000 --- a/exp/lighthorizon/index/backend/s3.go +++ /dev/null @@ -1,220 +0,0 @@ -package index - -import ( - "bytes" - "compress/gzip" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" - - types "github.com/stellar/go/exp/lighthorizon/index/types" -) - -type S3Backend struct { - s3Session *session.Session - downloader *s3manager.Downloader - uploader *s3manager.Uploader - parallel uint32 - pathPrefix string - bucket string -} - -func NewS3Backend(awsConfig *aws.Config, bucket string, pathPrefix string, parallel uint32) (*S3Backend, error) { - s3Session, err := session.NewSession(awsConfig) - if err != nil { - return nil, err - } - - return &S3Backend{ - s3Session: s3Session, - downloader: s3manager.NewDownloader(s3Session), - uploader: s3manager.NewUploader(s3Session), - parallel: parallel, - pathPrefix: pathPrefix, - bucket: bucket, - }, nil -} - -func (s *S3Backend) FlushAccounts(accounts []string) error { - var buf bytes.Buffer - accountsString := strings.Join(accounts, "\n") - _, err := buf.WriteString(accountsString) - if err != nil { - return err - } - - path := filepath.Join(s.pathPrefix, "accounts") - - _, err = s.uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - Body: &buf, - }) - if err != nil { - return err - } - - return nil -} - -func (s *S3Backend) Flush(indexes map[string]types.NamedIndices) error { - return parallelFlush(s.parallel, indexes, s.writeBatch) -} - -func (s *S3Backend) writeBatch(b *batch) error { - // TODO: re-use buffers in a pool - var buf bytes.Buffer - if _, err := writeGzippedTo(&buf, b.indexes); err != nil { - // TODO: Should we retry or what here?? - return errors.Wrapf(err, "unable to serialize %s", b.account) - } - - path := s.path(b.account) - - _, err := s.uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - Body: &buf, - }) - if err != nil { - return errors.Wrapf(err, "unable to upload %s", b.account) - } - - return nil -} - -func (s *S3Backend) FlushTransactions(indexes map[string]*types.TrieIndex) error { - // TODO: Parallelize this - var buf bytes.Buffer - for key, index := range indexes { - buf.Reset() - path := filepath.Join(s.pathPrefix, "tx", key) - - zw := gzip.NewWriter(&buf) - if _, err := index.WriteTo(zw); err != nil { - log.Errorf("Unable to serialize %s: %v", path, err) - continue - } - - if err := zw.Close(); err != nil { - log.Errorf("Unable to serialize %s: %v", path, err) - continue - } - - _, err := s.uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - Body: &buf, - }) - if err != nil { - log.Errorf("Unable to upload %s: %v", path, err) - // TODO: retries - continue - } - } - return nil -} - -func (s *S3Backend) ReadAccounts() ([]string, error) { - log.Debugf("Downloading accounts list") - b := &aws.WriteAtBuffer{} - path := filepath.Join(s.pathPrefix, "accounts") - n, err := s.downloader.Download(b, &s3.GetObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey { - return nil, os.ErrNotExist - } - return nil, errors.Wrapf(err, "Unable to download accounts list") - } - if n == 0 { - return nil, os.ErrNotExist - } - body := b.Bytes() - accounts := strings.Split(string(body), "\n") - return accounts, nil -} - -func (s *S3Backend) path(account string) string { - return filepath.Join(s.pathPrefix, account[:10], account) -} - -func (s *S3Backend) Read(account string) (types.NamedIndices, error) { - // Check if index exists in S3 - log.Debugf("Downloading index: %s", account) - var err error - for i := 0; i < 10; i++ { - b := &aws.WriteAtBuffer{} - path := s.path(account) - var n int64 - n, err = s.downloader.Download(b, &s3.GetObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey { - return nil, os.ErrNotExist - } - err = errors.Wrapf(err, "Unable to download %s", account) - time.Sleep(100 * time.Millisecond) - continue - } - if n == 0 { - return nil, os.ErrNotExist - } - var indexes map[string]*types.BitmapIndex - indexes, _, err = readGzippedFrom(bytes.NewReader(b.Bytes())) - if err != nil { - log.Errorf("Unable to parse %s: %v", account, err) - return nil, os.ErrNotExist - } - return indexes, nil - } - - return nil, err -} - -func (s *S3Backend) ReadTransactions(prefix string) (*types.TrieIndex, error) { - // Check if index exists in S3 - log.Debugf("Downloading index: %s", prefix) - b := &aws.WriteAtBuffer{} - path := filepath.Join(s.pathPrefix, "tx", prefix) - n, err := s.downloader.Download(b, &s3.GetObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(path), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey { - return nil, os.ErrNotExist - } - return nil, errors.Wrapf(err, "Unable to download %s", prefix) - } - if n == 0 { - return nil, os.ErrNotExist - } - zr, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - log.Errorf("Unable to parse %s: %v", prefix, err) - return nil, os.ErrNotExist - } - defer zr.Close() - - var index types.TrieIndex - _, err = index.ReadFrom(zr) - if err != nil { - log.Errorf("Unable to parse %s: %v", prefix, err) - return nil, os.ErrNotExist - } - return &index, nil -} diff --git a/exp/lighthorizon/index/builder.go b/exp/lighthorizon/index/builder.go deleted file mode 100644 index 324783b4f0..0000000000 --- a/exp/lighthorizon/index/builder.go +++ /dev/null @@ -1,366 +0,0 @@ -package index - -import ( - "context" - "fmt" - "io" - "math" - "os" - "sync" - "sync/atomic" - "time" - - "golang.org/x/sync/errgroup" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/ingest" - "github.com/stellar/go/ingest/ledgerbackend" - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" - "github.com/stellar/go/support/storage" - "github.com/stellar/go/xdr" -) - -func BuildIndices( - ctx context.Context, - sourceUrl string, // where is raw txmeta coming from? - targetUrl string, // where should the resulting indices go? - networkPassphrase string, - ledgerRange historyarchive.Range, // inclusive - modules []string, - workerCount int, -) (*IndexBuilder, error) { - L := log.Ctx(ctx).WithField("service", "builder") - - indexStore, err := ConnectWithConfig(StoreConfig{ - URL: targetUrl, - Workers: uint32(workerCount), - Log: L.WithField("subservice", "index"), - }) - if err != nil { - return nil, err - } - - // We use historyarchive as a backend here just to abstract away dealing - // with the filesystem directly. - source, err := historyarchive.ConnectBackend( - sourceUrl, - storage.ConnectOptions{ - Context: ctx, - S3Region: "us-east-1", - }, - ) - if err != nil { - return nil, err - } - - metaArchive := metaarchive.NewMetaArchive(source) - - ledgerBackend := ledgerbackend.NewHistoryArchiveBackend(metaArchive) - - if ledgerRange.High == 0 { - var backendErr error - ledgerRange.High, backendErr = ledgerBackend.GetLatestLedgerSequence(ctx) - if backendErr != nil { - return nil, backendErr - } - } - - if ledgerRange.High < ledgerRange.Low { - return nil, fmt.Errorf("invalid ledger range: %s", ledgerRange.String()) - } - - ledgerCount := 1 + (ledgerRange.High - ledgerRange.Low) // +1 bc inclusive - parallel := int(max(1, uint32(workerCount))) - - startTime := time.Now() - L.Infof("Creating indices for ledger range: [%d, %d] (%d ledgers)", - ledgerRange.Low, ledgerRange.High, ledgerCount) - L.Infof("Using %d workers", parallel) - - // Create a bunch of workers that process ledgers a checkpoint range at a - // time (better than a ledger at a time to minimize flushes). - wg, ctx := errgroup.WithContext(ctx) - ch := make(chan historyarchive.Range, parallel) - - indexBuilder := NewIndexBuilder(indexStore, metaArchive, networkPassphrase) - for _, part := range modules { - switch part { - case "transactions": - indexBuilder.RegisterModule(ProcessTransaction) - case "accounts": - indexBuilder.RegisterModule(ProcessAccountsByCheckpoint) - case "accounts_by_ledger": - indexBuilder.RegisterModule(ProcessAccountsByLedger) - case "accounts_unbacked": - indexBuilder.RegisterModule(ProcessAccountsByCheckpointWithoutBackend) - indexStore.ClearMemory(false) - case "accounts_by_ledger_unbacked": - indexBuilder.RegisterModule(ProcessAccountsByLedgerWithoutBackend) - indexStore.ClearMemory(false) - default: - return indexBuilder, fmt.Errorf("unknown module '%s'", part) - } - } - - // Submit the work to the channels, breaking up the range into individual - // checkpoint ranges. - checkpoints := historyarchive.NewCheckpointManager(0) - go func() { - for ledger := range ledgerRange.GenerateCheckpoints(checkpoints) { - chunk := checkpoints.GetCheckpointRange(ledger) - chunk.High = min(chunk.High, ledgerRange.High) // don't exceed upper bound - chunk.Low = max(chunk.Low, ledgerRange.Low) // nor the lower bound - - ch <- chunk - } - - close(ch) - }() - - processed := uint64(0) - for i := 0; i < parallel; i++ { - wg.Go(func() error { - for ledgerRange := range ch { - count := (ledgerRange.High - ledgerRange.Low) + 1 - L.Debugf("Working on checkpoint range [%d, %d] (%d ledgers)", - ledgerRange.Low, ledgerRange.High, count) - - if err := indexBuilder.Build(ctx, ledgerRange); err != nil { - return errors.Wrapf(err, - "building indices for ledger range [%d, %d] failed", - ledgerRange.Low, ledgerRange.High) - } - - nprocessed := atomic.AddUint64(&processed, uint64(count)) - if nprocessed%1234 == 0 { - PrintProgress("Reading ledgers", nprocessed, uint64(ledgerCount), startTime) - } - - // Upload indices once every 10 checkpoints to save memory - if nprocessed%(10*uint64(checkpoints.GetCheckpointFrequency())) == 0 { - if err := indexStore.Flush(); err != nil { - return errors.Wrap(err, "flushing indices failed") - } - } - } - return nil - }) - } - - if err := wg.Wait(); err != nil { - return indexBuilder, errors.Wrap(err, "one or more workers failed") - } - - PrintProgress("Reading ledgers", processed, uint64(ledgerCount), startTime) - - L.Infof("Processed %d ledgers via %d workers", processed, parallel) - L.Infof("Uploading indices to %s", targetUrl) - if err := indexStore.Flush(); err != nil { - return indexBuilder, errors.Wrap(err, "flushing indices failed") - } - - // Assertion for testing - if processed != uint64(ledgerCount) { - L.Warnf("processed %d but expected %d", processed, ledgerCount) - } - - return indexBuilder, nil -} - -// Module is a way to process ingested data and shove it into an index store. -type Module func( - indexStore Store, - ledger xdr.LedgerCloseMeta, - transaction ingest.LedgerTransaction, -) error - -// IndexBuilder contains everything needed to build indices from ledger ranges. -type IndexBuilder struct { - store Store - metaArchive metaarchive.MetaArchive - networkPassphrase string - - lastBuiltLedgerWriteLock sync.Mutex - lastBuiltLedger uint32 - - modules []Module -} - -func NewIndexBuilder( - indexStore Store, - metaArchive metaarchive.MetaArchive, - networkPassphrase string, -) *IndexBuilder { - return &IndexBuilder{ - store: indexStore, - metaArchive: metaArchive, - networkPassphrase: networkPassphrase, - } -} - -// RegisterModule adds a module to process every given ledger. It is not -// threadsafe and all calls should be made *before* any calls to `Build`. -func (builder *IndexBuilder) RegisterModule(module Module) { - builder.modules = append(builder.modules, module) -} - -// RunModules executes all of the registered modules on the given ledger. -func (builder *IndexBuilder) RunModules( - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - for _, module := range builder.modules { - if err := module(builder.store, ledger, tx); err != nil { - return err - } - } - - return nil -} - -// Build sequentially creates indices for each ledger in the given range based -// on the registered modules. -// -// TODO: We can probably optimize this by doing GetLedger in parallel with the -// ingestion & index building, since the network will be idle during the latter -// portion. -func (builder *IndexBuilder) Build(ctx context.Context, ledgerRange historyarchive.Range) error { - for ledgerSeq := ledgerRange.Low; ledgerSeq <= ledgerRange.High; ledgerSeq++ { - ledger, err := builder.metaArchive.GetLedger(ctx, ledgerSeq) - if err != nil { - if !os.IsNotExist(err) { - log.Errorf("error getting ledger %d: %v", ledgerSeq, err) - } - return err - } - - reader, err := ingest.NewLedgerTransactionReaderFromLedgerCloseMeta( - builder.networkPassphrase, *ledger.V0) - if err != nil { - return err - } - - for { - tx, err := reader.Read() - if err == io.EOF { - break - } else if err != nil { - return err - } - - if err := builder.RunModules(*ledger.V0, tx); err != nil { - return err - } - } - } - - builder.lastBuiltLedgerWriteLock.Lock() - defer builder.lastBuiltLedgerWriteLock.Unlock() - builder.lastBuiltLedger = max(builder.lastBuiltLedger, ledgerRange.High) - - return nil -} - -func (builder *IndexBuilder) Watch(ctx context.Context) error { - latestLedger, err := builder.metaArchive.GetLatestLedgerSequence(ctx) - if err != nil { - log.Errorf("Failed to retrieve latest ledger: %v", err) - return err - } - nextLedger := builder.lastBuiltLedger + 1 - - log.Infof("Catching up to latest ledger: (%d, %d]", nextLedger, latestLedger) - if err = builder.Build(ctx, historyarchive.Range{ - Low: nextLedger, - High: latestLedger, - }); err != nil { - log.Errorf("Initial catchup failed: %v", err) - } - - for { - nextLedger = builder.lastBuiltLedger + 1 - log.Infof("Awaiting next ledger (%d)", nextLedger) - - // To keep the MVP simple, let's just naively poll the backend until the - // ledger we want becomes available. - // - // Refer to this thread [1] for a deeper brain dump on why we're - // preferring this over doing proper filesystem monitoring (e.g. - // fsnotify for on-disk). Essentially, supporting this for every - // possible index backend is a non-trivial amount of work with an - // uncertain payoff. - // - // [1]: https://stellarfoundation.slack.com/archives/C02B04RMK/p1654903342555669 - - // We sleep with linear backoff starting with 6s. Ledgers get posted - // every 5-7s on average, but to be extra careful, let's give it a full - // minute before we give up entirely. - timedCtx, cancel := context.WithTimeout(ctx, 60*time.Second) - defer cancel() - - sleepTime := (6 * time.Second) - outer: - for { - time.Sleep(sleepTime) - select { - case <-timedCtx.Done(): - return errors.Wrap(timedCtx.Err(), "awaiting next ledger failed") - - default: - buildErr := builder.Build(timedCtx, historyarchive.Range{ - Low: nextLedger, - High: nextLedger, - }) - if buildErr == nil { - break outer - } - - if os.IsNotExist(buildErr) { - sleepTime += (time.Second * 2) - continue - } - - return errors.Wrap(buildErr, "awaiting next ledger failed") - } - } - } -} - -func PrintProgress(prefix string, done, total uint64, startTime time.Time) { - progress := float64(done) / float64(total) - elapsed := time.Since(startTime) - - // Approximate based on how many stuff is left to do and how long this much - // progress took, e.g. if 4/10 took 2s then 6/10 will "take" 3s (though this - // assumes consistent load). - remaining := (float64(elapsed) / float64(done)) * float64(total-done) - - var remainingStr string - if math.IsInf(remaining, 0) || math.IsNaN(remaining) { - remainingStr = "unknown" - } else { - remainingStr = time.Duration(remaining).Round(time.Millisecond).String() - } - - log.Infof("%s - %.1f%% (%d/%d) - elapsed: %s, remaining: ~%s", prefix, - 100*progress, done, total, - elapsed.Round(time.Millisecond), - remainingStr, - ) -} - -func min(a, b uint32) uint32 { - if a < b { - return a - } - return b -} - -func max(a, b uint32) uint32 { - if a > b { - return a - } - return b -} diff --git a/exp/lighthorizon/index/cmd/batch/doc.go b/exp/lighthorizon/index/cmd/batch/doc.go deleted file mode 100644 index 70e55009d5..0000000000 --- a/exp/lighthorizon/index/cmd/batch/doc.go +++ /dev/null @@ -1,52 +0,0 @@ -// Package batch provides two commands: map and reduce that can be run in AWS -// Batch to generate indexes for occurences of accounts in each checkpoint. -// -// map step is using AWS_BATCH_JOB_ARRAY_INDEX env variable provided by AWS -// Batch to cut all checkpoint history into smaller chunks, each processed by a -// single map batch job (and by multiple parallel workers in a single job). A -// single job simply creates indexes for a given range of checkpoints and save -// indexes and all accounts seen in a given range (FlushAccounts method) to a -// job folder (job_X, X = 0, 1, 2, 3, ...) in S3. -// -// network history split into chunks: -// [ | | | | | | | | | | | | | | | | | | | | | ] -// ---- -// / \ -// / \ -// / \ -// [..........] <- each chunk consists of checkpoints -// | -// . - each checkpoint is processed by a free -// worker (go routine) -// -// reduce step is responsible for merging all indexes created in map step into a -// final indexes for each account and for entire network history. Each reduce -// job goes through all map job results (0..MAP_JOBS) and reads all accounts -// processed in a given map job. Then for each account it merges indexes from -// all map jobs. Each reduce job maintains `doneAccounts` map because if a given -// account index was processed earlier it should be skipped instead of being -// processed again. Each reduce job also runs multiple parallel workers. Finally -// the method that is used to determine if the following (job, worker) should -// process a given account is using a 64-bit hash of account ID. The hash is -// split into two 32-bit parts: left and right. If the left part modulo -// REDUCE_JOBS is equal the job index and the right part modulo a number of -// parallel workers is equal the worker index then the account is processed. -// Otherwise it's skipped (and will be picked by another (job, worker) pair). -// -// map step results saved in S3: -// x x x x x x x x x x x x x x x x x x x x x x x x x x x x -// | -// ã„´ job0/accounts <- each job results contains a list of accounts -// | processed by a given job... -// | -// ã„´ job0/... <- ...and partial indexes -// -// hash(account_id) => XXXX YYYY <- 64 bit hash of account id is calculated -// -// if XXXX % REDUCE_JOBS == JOB_ID and YYYY % WORKERS_COUNT = WORKER_ID -// then process a given account by merging all indexes of a given account -// in all map step results, then mark account as done so if the account -// is seen again it will be skiped, -// -// else: skip the account. -package batch diff --git a/exp/lighthorizon/index/cmd/batch/map/main.go b/exp/lighthorizon/index/cmd/batch/map/main.go deleted file mode 100644 index 384e99ee80..0000000000 --- a/exp/lighthorizon/index/cmd/batch/map/main.go +++ /dev/null @@ -1,144 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "runtime" - "strconv" - "strings" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/network" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" -) - -type BatchConfig struct { - historyarchive.Range - TxMetaSourceUrl string - IndexTargetUrl string - NetworkPassphrase string -} - -const ( - batchSizeEnv = "BATCH_SIZE" - jobIndexEnvName = "JOB_INDEX_ENV" - firstCheckpointEnv = "FIRST_CHECKPOINT" - txmetaSourceUrlEnv = "TXMETA_SOURCE" - indexTargetUrlEnv = "INDEX_TARGET" - workerCountEnv = "WORKER_COUNT" - networkPassphraseEnv = "NETWORK_PASSPHRASE" - modulesEnv = "MODULES" -) - -func NewBatchConfig() (*BatchConfig, error) { - indexTargetRootUrl := os.Getenv(indexTargetUrlEnv) - if indexTargetRootUrl == "" { - return nil, errors.New("required parameter: " + indexTargetUrlEnv) - } - - jobIndexEnv := os.Getenv(jobIndexEnvName) - if jobIndexEnv == "" { - return nil, errors.New("env variable can't be empty " + jobIndexEnvName) - } - jobIndex, err := strconv.ParseUint(os.Getenv(jobIndexEnv), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+jobIndexEnv) - } - - firstCheckpoint, err := strconv.ParseUint(os.Getenv(firstCheckpointEnv), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+firstCheckpointEnv) - } - - checkpoints := historyarchive.NewCheckpointManager(0) - if !checkpoints.IsCheckpoint(uint32(firstCheckpoint - 1)) { - return nil, fmt.Errorf( - "%s (%d) must be the first ledger in a checkpoint range", - firstCheckpointEnv, firstCheckpoint) - } - - batchSize, err := strconv.ParseUint(os.Getenv(batchSizeEnv), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+batchSizeEnv) - } else if batchSize%uint64(checkpoints.GetCheckpointFrequency()) != 0 { - return nil, fmt.Errorf( - "%s (%d) must be a multiple of checkpoint frequency (%d)", - batchSizeEnv, batchSize, checkpoints.GetCheckpointFrequency()) - } - - txmetaSourceUrl := os.Getenv(txmetaSourceUrlEnv) - if txmetaSourceUrl == "" { - return nil, errors.New("required parameter " + txmetaSourceUrlEnv) - } - - firstLedger := uint32(firstCheckpoint + batchSize*jobIndex) - lastLedger := firstLedger + uint32(batchSize) - 1 - return &BatchConfig{ - Range: historyarchive.Range{Low: firstLedger, High: lastLedger}, - TxMetaSourceUrl: txmetaSourceUrl, - IndexTargetUrl: fmt.Sprintf("%s%cjob_%d", indexTargetRootUrl, os.PathSeparator, jobIndex), - }, nil -} - -func main() { - log.SetLevel(log.InfoLevel) - // log.SetLevel(log.DebugLevel) - - batch, err := NewBatchConfig() - if err != nil { - panic(err) - } - - var workerCount int - workerCountStr := os.Getenv(workerCountEnv) - if workerCountStr == "" { - workerCount = runtime.NumCPU() - } else { - workerCountParsed, innerErr := strconv.ParseUint(workerCountStr, 10, 8) - if innerErr != nil { - panic(errors.Wrapf(innerErr, - "invalid worker count parameter (%s)", workerCountStr)) - } - workerCount = int(workerCountParsed) - } - - networkPassphrase := os.Getenv(networkPassphraseEnv) - switch networkPassphrase { - case "": - log.Warnf("%s not specified, defaulting to 'testnet'", networkPassphraseEnv) - fallthrough - case "testnet": - networkPassphrase = network.TestNetworkPassphrase - case "pubnet": - networkPassphrase = network.PublicNetworkPassphrase - default: - log.Warnf("%s is not a recognized shortcut ('pubnet' or 'testnet')", - networkPassphraseEnv) - } - log.Infof("Using network passphrase '%s'", networkPassphrase) - - parsedModules := []string{} - if modules := os.Getenv(modulesEnv); modules == "" { - parsedModules = append(parsedModules, "accounts_unbacked") - } else { - parsedModules = append(parsedModules, strings.Split(modules, ",")...) - } - - log.Infof("Uploading ledger range [%d, %d] to %s", - batch.Range.Low, batch.Range.High, batch.IndexTargetUrl) - - if _, err := index.BuildIndices( - context.Background(), - batch.TxMetaSourceUrl, - batch.IndexTargetUrl, - networkPassphrase, - batch.Range, - parsedModules, - workerCount, - ); err != nil { - panic(err) - } -} diff --git a/exp/lighthorizon/index/cmd/batch/reduce/main.go b/exp/lighthorizon/index/cmd/batch/reduce/main.go deleted file mode 100644 index bff9f8216a..0000000000 --- a/exp/lighthorizon/index/cmd/batch/reduce/main.go +++ /dev/null @@ -1,389 +0,0 @@ -package main - -import ( - "encoding/hex" - "hash/fnv" - "os" - "strconv" - "strings" - "sync" - - "github.com/stellar/go/exp/lighthorizon/index" - types "github.com/stellar/go/exp/lighthorizon/index/types" - "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" -) - -const ( - ACCOUNT_FLUSH_FREQUENCY = 200 - // arbitrary default, should we use runtime.NumCPU()? - DEFAULT_WORKER_COUNT = 2 -) - -type ReduceConfig struct { - JobIndex uint32 - MapJobCount uint32 - ReduceJobCount uint32 - IndexTarget string - IndexRootSource string - - Workers uint32 -} - -func ReduceConfigFromEnvironment() (*ReduceConfig, error) { - const ( - mapJobsEnv = "MAP_JOB_COUNT" - reduceJobsEnv = "REDUCE_JOB_COUNT" - workerCountEnv = "WORKER_COUNT" - jobIndexEnvName = "JOB_INDEX_ENV" - indexRootSourceEnv = "INDEX_SOURCE_ROOT" - indexTargetEnv = "INDEX_TARGET" - ) - - jobIndexEnv := strings.TrimSpace(os.Getenv(jobIndexEnvName)) - if jobIndexEnv == "" { - return nil, errors.New("env variable can't be empty " + jobIndexEnvName) - } - - jobIndex, err := strconv.ParseUint(strings.TrimSpace(os.Getenv(jobIndexEnv)), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+jobIndexEnv) - } - mapJobCount, err := strconv.ParseUint(strings.TrimSpace(os.Getenv(mapJobsEnv)), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+mapJobsEnv) - } - reduceJobCount, err := strconv.ParseUint(strings.TrimSpace(os.Getenv(reduceJobsEnv)), 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+reduceJobsEnv) - } - - workersStr := strings.TrimSpace(os.Getenv(workerCountEnv)) - if workersStr == "" { - workersStr = strconv.FormatUint(DEFAULT_WORKER_COUNT, 10) - } - workers, err := strconv.ParseUint(workersStr, 10, 32) - if err != nil { - return nil, errors.Wrap(err, "invalid parameter "+workerCountEnv) - } - - indexTarget := strings.TrimSpace(os.Getenv(indexTargetEnv)) - if indexTarget == "" { - return nil, errors.New("required parameter missing " + indexTargetEnv) - } - - indexRootSource := strings.TrimSpace(os.Getenv(indexRootSourceEnv)) - if indexRootSource == "" { - return nil, errors.New("required parameter missing " + indexRootSourceEnv) - } - - return &ReduceConfig{ - JobIndex: uint32(jobIndex), - MapJobCount: uint32(mapJobCount), - ReduceJobCount: uint32(reduceJobCount), - Workers: uint32(workers), - IndexTarget: indexTarget, - IndexRootSource: indexRootSource, - }, nil -} - -func main() { - log.SetLevel(log.InfoLevel) - - config, err := ReduceConfigFromEnvironment() - if err != nil { - panic(err) - } - - log.Infof("Connecting to %s", config.IndexTarget) - finalIndexStore, err := index.Connect(config.IndexTarget) - if err != nil { - panic(errors.Wrapf(err, "failed to connect to indices at %s", - config.IndexTarget)) - } - - if err := mergeAllIndices(finalIndexStore, config); err != nil { - panic(errors.Wrap(err, "failed to merge indices")) - } -} - -func mergeAllIndices(finalIndexStore index.Store, config *ReduceConfig) error { - doneAccounts := set.NewSafeSet[string](512) - for i := uint32(0); i < config.MapJobCount; i++ { - jobLogger := log.WithField("job", i) - - jobSubPath := "job_" + strconv.FormatUint(uint64(i), 10) - jobLogger.Infof("Connecting to url %s, sub-path %s", config.IndexRootSource, jobSubPath) - outerJobStore, err := index.ConnectWithConfig(index.StoreConfig{ - URL: config.IndexRootSource, - URLSubPath: jobSubPath, - }) - - if err != nil { - return errors.Wrapf(err, "failed to connect to indices at %s, sub-path %s", config.IndexRootSource, jobSubPath) - } - - accounts, err := outerJobStore.ReadAccounts() - // TODO: in final version this should be critical error, now just skip it - if os.IsNotExist(err) { - jobLogger.Errorf("accounts file not found (TODO!)") - continue - } else if err != nil { - return errors.Wrapf(err, "failed to read accounts for job %d", i) - } - - jobLogger.Infof("Processing %d accounts with %d workers", - len(accounts), config.Workers) - - workQueues := make([]chan string, config.Workers) - for i := range workQueues { - workQueues[i] = make(chan string, 1) - } - - for idx, queue := range workQueues { - go (func(index uint32, queue chan string) { - for _, account := range accounts { - // Account index already merged in the previous outer job? - if doneAccounts.Contains(account) { - continue - } - - // Account doesn't belong in this work queue? - if !config.shouldProcessAccount(account, index) { - continue - } - - queue <- account - } - - close(queue) - })(uint32(idx), queue) - } - - // TODO: errgroup.WithContext(ctx) - var wg sync.WaitGroup - wg.Add(int(config.Workers)) - for j := uint32(0); j < config.Workers; j++ { - go func(routineIndex uint32) { - defer wg.Done() - accountLog := jobLogger. - WithField("worker", routineIndex). - WithField("subservice", "accounts") - accountLog.Info("Started worker") - - var accountsProcessed, accountsSkipped uint64 - for account := range workQueues[routineIndex] { - accountLog. - WithField("total", len(accounts)). - WithField("indexed", accountsProcessed). - WithField("skipped", accountsSkipped) - - accountLog.Debugf("Account: %s", account) - if (accountsProcessed+accountsSkipped)%97 == 0 { - accountLog.Infof("Processed %d/%d accounts", - accountsProcessed+accountsSkipped, len(accounts)) - } - - accountLog.Debugf("Reading index for account: %s", account) - - // First, open the "final merged indices" at the root level - // for this account. - mergedIndices, readErr := outerJobStore.Read(account) - - // TODO: in final version this should be critical error, now just skip it - if os.IsNotExist(readErr) { - accountLog.Errorf("Account %s is unavailable - TODO fix", account) - continue - } else if err != nil { - panic(readErr) - } - - // Then, iterate through all of the job folders and merge - // indices from all jobs that touched this account. - for k := uint32(0); k < config.MapJobCount; k++ { - var jobErr error - - // FIXME: This could probably come from a pool. Every - // worker needs to have a connection to every index - // store, so there's no reason to re-open these for each - // inner loop. - innerJobSubPath := "job_" + strconv.FormatUint(uint64(k), 10) - innerJobStore, jobErr := index.ConnectWithConfig(index.StoreConfig{ - URL: config.IndexRootSource, - URLSubPath: innerJobSubPath, - }) - - if jobErr != nil { - accountLog.WithError(jobErr). - Errorf("Failed to open index at %s, sub-path %s", config.IndexRootSource, innerJobSubPath) - panic(jobErr) - } - - jobIndices, jobErr := innerJobStore.Read(account) - - // This job never touched this account; skip. - if os.IsNotExist(jobErr) { - continue - } else if jobErr != nil { - accountLog.WithError(jobErr). - Errorf("Failed to read index for %s", account) - panic(jobErr) - } - - if jobErr = mergeIndices(mergedIndices, jobIndices); jobErr != nil { - accountLog.WithError(jobErr). - Errorf("Merge failure for index at %s, sub-path %s", config.IndexRootSource, innerJobSubPath) - panic(jobErr) - } - } - - // Finally, save the merged index. - finalIndexStore.AddParticipantToIndexesNoBackend(account, mergedIndices) - - // Mark this account for other workers to ignore. - doneAccounts.Add(account) - accountsProcessed++ - accountLog = accountLog.WithField("processed", accountsProcessed) - - // Periodically flush to disk to save memory. - if accountsProcessed%ACCOUNT_FLUSH_FREQUENCY == 0 { - accountLog.Infof("Flushing indexed accounts.") - if flushErr := finalIndexStore.Flush(); flushErr != nil { - accountLog.WithError(flushErr).Errorf("Flush error.") - panic(flushErr) - } - } - } - - accountLog.Infof("Final account flush.") - if err = finalIndexStore.Flush(); err != nil { - accountLog.WithError(err).Errorf("Flush error.") - panic(err) - } - - // Merge the transaction indexes - // There's 256 files, (one for each first byte of the txn hash) - txLog := jobLogger. - WithField("worker", routineIndex). - WithField("subservice", "transactions") - - var prefixesProcessed, prefixesSkipped uint64 - for i := int(0x00); i <= 0xff; i++ { - b := byte(i) // can't loop over range bc overflow - if b%97 == 0 { - txLog.Infof("Processed %d/%d prefixes (%d skipped)", - prefixesProcessed, 0xff, prefixesSkipped) - } - - if !config.shouldProcessTx(b, routineIndex) { - prefixesSkipped++ - continue - } - - txLog = txLog. - WithField("indexed", prefixesProcessed). - WithField("skipped", prefixesSkipped) - - prefix := hex.EncodeToString([]byte{b}) - for k := uint32(0); k < config.MapJobCount; k++ { - var innerErr error - innerJobSubPath := "job_" + strconv.FormatUint(uint64(k), 10) - innerJobStore, innerErr := index.ConnectWithConfig(index.StoreConfig{ - URL: config.IndexRootSource, - URLSubPath: innerJobSubPath, - }) - - if innerErr != nil { - txLog.WithError(innerErr).Errorf("Failed to open index at %s, sub-path %s", config.IndexRootSource, innerJobSubPath) - panic(innerErr) - } - - innerTxnIndexes, innerErr := innerJobStore.ReadTransactions(prefix) - if os.IsNotExist(innerErr) { - continue - } else if innerErr != nil { - txLog.WithError(innerErr).Errorf("Error reading tx prefix %s", prefix) - panic(innerErr) - } - - if innerErr = finalIndexStore.MergeTransactions(prefix, innerTxnIndexes); innerErr != nil { - txLog.WithError(innerErr).Errorf("Error merging txs at prefix %s", prefix) - panic(innerErr) - } - } - - prefixesProcessed++ - } - - txLog = txLog. - WithField("indexed", prefixesProcessed). - WithField("skipped", prefixesSkipped) - - txLog.Infof("Final transaction flush...") - if err = finalIndexStore.Flush(); err != nil { - txLog.Errorf("Error flushing transactions: %v", err) - panic(err) - } - }(j) - } - - wg.Wait() - } - - return nil -} - -func (cfg *ReduceConfig) shouldProcessAccount(account string, routineIndex uint32) bool { - hash := fnv.New64a() - - // Docs state (https://pkg.go.dev/hash#Hash) that Write will never error. - hash.Write([]byte(account)) - digest := uint32(hash.Sum64()) // discard top 32 bits - - leftHalf := digest >> 16 - rightHalf := digest & 0x0000FFFF - - log.WithField("worker", routineIndex). - WithField("account", account). - Debugf("Hash: %d (left=%d, right=%d)", digest, leftHalf, rightHalf) - - // Because the digest is basically a random number (given a good hash - // function), its remainders w.r.t. the indices will distribute the work - // fairly (and deterministically). - return leftHalf%cfg.ReduceJobCount == cfg.JobIndex && - rightHalf%cfg.Workers == routineIndex -} - -func (cfg *ReduceConfig) shouldProcessTx(txPrefix byte, routineIndex uint32) bool { - hashLeft := uint32(txPrefix >> 4) - hashRight := uint32(txPrefix & 0x0F) - - // Because the transaction hash (and thus the first byte or "prefix") is a - // random value, its remainders w.r.t. the indices will distribute the work - // fairly (and deterministically). - return hashRight%cfg.ReduceJobCount == cfg.JobIndex && - hashLeft%cfg.Workers == routineIndex -} - -// For every index that exists in `dest`, finds the corresponding index in -// `source` and merges it into `dest`'s version. -func mergeIndices(dest, source map[string]*types.BitmapIndex) error { - for name, index := range dest { - // The source doesn't contain this particular index. - // - // This probably shouldn't happen, since during the Map step, there's no - // way to choose which indices you want, but, strictly-speaking, it's - // not an error, so we can just move on. - innerIndices, ok := source[name] - if !ok || innerIndices == nil { - continue - } - - if err := index.Merge(innerIndices); err != nil { - return errors.Wrapf(err, "failed to merge index for %s", name) - } - } - - return nil -} diff --git a/exp/lighthorizon/index/cmd/map.sh b/exp/lighthorizon/index/cmd/map.sh deleted file mode 100755 index 390370f2cb..0000000000 --- a/exp/lighthorizon/index/cmd/map.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# -# Breaks up the given ledger dumps into checkpoints and runs a map -# job on each one. However, it's the Golang side does validation that -# the map job resulted in the correct indices. -# - -# check parameters and their validity (types, existence, etc.) - -if [[ "$#" -ne "2" ]]; then - echo "Usage: $0 " - exit 1 -fi - -if [[ ! -d "$1" ]]; then - echo "Error: txmeta src ('$1') does not exist" - echo "Usage: $0 " - exit 1 -fi - -if [[ -z $BATCH_SIZE ]]; then - echo "BATCH_SIZE environmental variable required" - exit 1 -elif ! [[ $BATCH_SIZE =~ ^[0-9]+$ ]]; then - echo "BATCH_SIZE ('$BATCH_SIZE') must be an integer" - exit 1 -fi - -if [[ -z $FIRST_LEDGER || -z $LAST_LEDGER ]]; then - echo "FIRST_LEDGER and LAST_LEDGER environmental variables required" - exit 1 -elif ! [[ $FIRST_LEDGER =~ ^[0-9]+$ && $LAST_LEDGER =~ ^[0-9]+$ ]]; then - echo "FIRST_LEDGER ('$FIRST_LEDGER') and LAST_LEDGER ('$LAST_LEDGER') must be integers" - exit 1 -fi - -if [[ ! -d "$2" ]]; then - echo "Warning: index dest ('$2') does not exist, creating..." - mkdir -p $2 -fi - -# do work - -FIRST=$FIRST_LEDGER -LAST=$LAST_LEDGER -COUNT=$(($LAST-$FIRST+1)) -# batches = ceil(count / batch_size) -# formula is from https://stackoverflow.com/a/12536521 -BATCH_COUNT=$(( ($COUNT + $BATCH_SIZE - 1) / $BATCH_SIZE )) - -if [[ "$(((LAST + 1) % 64))" -ne "0" ]]; then - echo "LAST_LEDGER ($LAST_LEDGER) should be a checkpoint ledger" - exit 1 -fi - -echo " - start: $FIRST" -echo " - end: $LAST" -echo " - count: $COUNT ($BATCH_COUNT batches @ $BATCH_SIZE ledgers each)" - -go build -o ./map ./batch/map/... -if [[ "$?" -ne "0" ]]; then - echo "Build failed" - exit 1 -fi - -pids=( ) -for (( i=0; i < $BATCH_COUNT; i++ )) -do - echo -n "Creating map job $i... " - - NETWORK_PASSPHRASE='testnet' JOB_INDEX_ENV='AWS_BATCH_JOB_ARRAY_INDEX' MODULES='accounts_unbacked,transactions' \ - AWS_BATCH_JOB_ARRAY_INDEX=$i BATCH_SIZE=$BATCH_SIZE FIRST_CHECKPOINT=$FIRST \ - TXMETA_SOURCE=file://$1 INDEX_TARGET=file://$2 WORKER_COUNT=1 \ - ./map & - - echo "pid=$!" - pids+=($!) -done - -sleep $BATCH_COUNT - -# Check the status codes for all of the map processes. -for i in "${!pids[@]}"; do - pid=${pids[$i]} - echo -n "Checking job $i (pid=$pid)... " - if ! wait "$pid"; then - echo "failed" - exit 1 - else - echo "succeeded!" - fi -done - -rm ./map -echo "All jobs succeeded!" -exit 0 diff --git a/exp/lighthorizon/index/cmd/mapreduce_test.go b/exp/lighthorizon/index/cmd/mapreduce_test.go deleted file mode 100644 index db529fd8bc..0000000000 --- a/exp/lighthorizon/index/cmd/mapreduce_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package main_test - -import ( - "encoding/hex" - "fmt" - "io" - "net/url" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "testing" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/network" - "github.com/stellar/go/support/collections/maps" - "github.com/stellar/go/support/collections/set" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - batchSize = 128 -) - -func TestMap(t *testing.T) { - RunMapTest(t) -} - -func TestReduce(t *testing.T) { - // First, map the index files like we normally would. - startLedger, endLedger, jobRoot := RunMapTest(t) - batchCount := (endLedger - startLedger + batchSize) / batchSize // ceil(ledgerCount / batchSize) - - // Now that indices have been "map"ped, reduce them to a single store. - - indexTarget := filepath.Join(t.TempDir(), "final-indices") - reduceTestCmd := exec.Command("./reduce.sh", jobRoot, indexTarget) - t.Logf("Running %d reduce jobs: %s", batchCount, reduceTestCmd.String()) - stdout, err := reduceTestCmd.CombinedOutput() - t.Logf(string(stdout)) - require.NoError(t, err) - - // Then, build the *same* indices using the single-process tester. - - t.Logf("Building baseline for ledger range [%d, %d]", startLedger, endLedger) - hashes, participants := IndexLedgerRange(t, txmetaSource, startLedger, endLedger) - - // Finally, compare the two to make sure the reduce job did what it's - // supposed to do. - - indexStore, err := index.Connect("file://" + indexTarget) - require.NoError(t, err) - stores := []index.Store{indexStore} // to reuse code: same as array of 1 store - - assertParticipantsEqual(t, maps.Keys(participants), stores) - for account, checkpoints := range participants { - assertParticipantCheckpointsEqual(t, account, checkpoints, stores) - } - - assertTOIDsEqual(t, hashes, stores) -} - -func RunMapTest(t *testing.T) (uint32, uint32, string) { - // Only file:// style URLs for the txmeta source are allowed while testing. - parsed, err := url.Parse(txmetaSource) - require.NoErrorf(t, err, "%s is not a valid URL", txmetaSource) - if parsed.Scheme != "file" { - t.Logf("%s is not local txmeta source", txmetaSource) - t.Skip() - } - txmetaPath := strings.Replace(txmetaSource, "file://", "", 1) - - // What ledger range are we working with? - checkpointMgr := historyarchive.NewCheckpointManager(0) - startLedger, endLedger := GetFixtureLedgerRange(t) - - // The map job *requires* that each one operate on a multiple of a - // checkpoint range, so we may need to adjust the ranges (depending on how - // many ledgers are in the fixutre) and break them up accordingly. - if !checkpointMgr.IsCheckpoint(startLedger - 1) { - startLedger = checkpointMgr.NextCheckpoint(startLedger-1) + 1 - } - if (endLedger-startLedger)%batchSize != 0 { - endLedger = checkpointMgr.PrevCheckpoint((endLedger / batchSize) * batchSize) - } - - require.Greaterf(t, endLedger, startLedger, - "not enough fixtures for batchSize=%d", batchSize) - - batchCount := (endLedger - startLedger + batchSize) / batchSize // ceil(ledgerCount / batchSize) - - t.Logf("Using %d batches to process ledger range [%d, %d]", - batchCount, startLedger, endLedger) - - require.Truef(t, - batchCount == 1 || checkpointMgr.IsCheckpoint(startLedger+batchSize-1), - "expected batch size (%d) to result in checkpoint blocks, "+ - "but start+batchSize+1 (%d+%d+1=%d) is not a checkpoint", - batchSize, batchSize, startLedger, batchSize+startLedger+1) - - // First, execute the map jobs in parallel and dump the resulting indices to - // a temporary directory. - - tempDir := filepath.Join(t.TempDir(), "indices-map") - mapTestCmd := exec.Command("./map.sh", txmetaPath, tempDir) - mapTestCmd.Env = append(os.Environ(), - fmt.Sprintf("BATCH_SIZE=%d", batchSize), - fmt.Sprintf("FIRST_LEDGER=%d", startLedger), - fmt.Sprintf("LAST_LEDGER=%d", endLedger), - fmt.Sprintf("NETWORK_PASSPHRASE='%s'", network.TestNetworkPassphrase)) - t.Logf("Running %d map jobs: %s", batchCount, mapTestCmd.String()) - stdout, err := mapTestCmd.CombinedOutput() - - t.Logf("Tried writing indices to %s:", tempDir) - t.Log(string(stdout)) - require.NoError(t, err) - - // Then, build the *same* indices using the single-process tester. - t.Logf("Building baseline for ledger range [%d, %d]", startLedger, endLedger) - hashes, participants := IndexLedgerRange(t, txmetaSource, startLedger, endLedger) - - // Now, walk through the mapped indices and ensure that at least one of the - // jobs reported the same indices for tx TOIDs and participation. - - stores := make([]index.Store, batchCount) - for i := range stores { - indexUrl := filepath.Join( - "file://", - tempDir, - "job_"+strconv.FormatUint(uint64(i), 10), - ) - index, err := index.Connect(indexUrl) - require.NoError(t, err) - require.NotNil(t, index) - stores[i] = index - - t.Logf("Connected to index #%d at %s", i+1, indexUrl) - } - - assertParticipantsEqual(t, maps.Keys(participants), stores) - for account, checkpoints := range participants { - assertParticipantCheckpointsEqual(t, account, checkpoints, stores) - } - - assertTOIDsEqual(t, hashes, stores) - - return startLedger, endLedger, tempDir -} - -func assertParticipantsEqual(t *testing.T, - expectedAccountSet []string, - indexGroup []index.Store, -) { - indexGroupAccountSet := set.NewSet[string](len(expectedAccountSet)) - for _, store := range indexGroup { - accounts, err := store.ReadAccounts() - require.NoError(t, err) - indexGroupAccountSet.AddSlice(accounts) - } - - assert.Lenf(t, indexGroupAccountSet, len(expectedAccountSet), - "quantity of accounts across indices doesn't match") - - mappedAccountSet := maps.Keys(indexGroupAccountSet) - require.ElementsMatch(t, expectedAccountSet, mappedAccountSet) -} - -func assertParticipantCheckpointsEqual(t *testing.T, - account string, - expected []uint32, - indexGroup []index.Store, -) { - // Ensure that all of the active checkpoints reported by the index match - // the ones we tracked while ingesting the range ourselves. - - foundCheckpoints := set.NewSet[uint32](len(expected)) - for _, store := range indexGroup { - var err error - var lastActiveCheckpoint uint32 = 0 - for { - lastActiveCheckpoint, err = store.NextActive(account, "all/all", lastActiveCheckpoint) - if err == io.EOF { - break - } - require.NoError(t, err) // still an error since it shouldn't happen - - foundCheckpoints.Add(lastActiveCheckpoint) - lastActiveCheckpoint += 1 // hit next active one - } - } - - // Error out if there were any extraneous checkpoints found. - for chk := range foundCheckpoints { - require.Containsf(t, expected, chk, - "found unexpected checkpoint %d", int(chk)) - } - - // Make sure everything got marked as expected in at least one index. - for _, item := range expected { - require.Containsf(t, foundCheckpoints, item, - "failed to find %d for %s (found %v)", - int(item), account, foundCheckpoints) - } -} - -func assertTOIDsEqual(t *testing.T, toids map[string]int64, stores []index.Store) { - for hash, toid := range toids { - rawHash := [32]byte{} - decodedHash, err := hex.DecodeString(hash) - require.NoError(t, err) - require.Lenf(t, decodedHash, 32, "invalid tx hash length") - copy(rawHash[:], decodedHash) - - found := false - for i, store := range stores { - storeToid, err := store.TransactionTOID(rawHash) - if err != nil { - require.ErrorIsf(t, err, io.EOF, - "only EOF errors are allowed (store %d, hash %s)", i, hash) - } else { - require.Equalf(t, toid, storeToid, - "TOIDs for tx 0x%s don't match (store %d)", hash, i) - found = true - } - } - - require.Truef(t, found, "TOID for tx 0x%s not found in stores", hash) - } -} diff --git a/exp/lighthorizon/index/cmd/reduce.sh b/exp/lighthorizon/index/cmd/reduce.sh deleted file mode 100755 index 1cfbca0ccc..0000000000 --- a/exp/lighthorizon/index/cmd/reduce.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash -# -# Combines indices that were built separately in different folders into a single -# set of indices. -# -# This focuses on starting parallel processes, but the Golang side does -# validation that the reduce jobs resulted in the correct indices. -# - -# check parameters and their validity (types, existence, etc.) - -if [[ "$#" -ne "2" ]]; then - echo "Usage: $0 " - exit 1 -fi - -if [[ ! -d "$1" ]]; then - echo "Error: index src root ('$1') does not exist" - echo "Usage: $0 " - exit 1 -fi - -if [[ ! -d "$2" ]]; then - echo "Warning: index dest ('$2') does not exist, creating..." - mkdir -p "$2" -fi - -MAP_JOB_COUNT=$(ls $1 | grep -E 'job_[0-9]+' | wc -l) -if [[ "$MAP_JOB_COUNT" -le "0" ]]; then - echo "No jobs in index src root ('$1') found." - exit 1 -fi -REDUCE_JOB_COUNT=$MAP_JOB_COUNT - -# build reduce program and start it up - -go build -o reduce ./batch/reduce/... -if [[ "$?" -ne "0" ]]; then - echo "Build failed" - exit 1 -fi - -echo "Coalescing $MAP_JOB_COUNT discovered job outputs from $1 into $2..." - -pids=( ) -for (( i=0; i < $REDUCE_JOB_COUNT; i++ )) -do - echo -n "Creating reduce job $i... " - - AWS_BATCH_JOB_ARRAY_INDEX=$i JOB_INDEX_ENV="AWS_BATCH_JOB_ARRAY_INDEX" MAP_JOB_COUNT=$MAP_JOB_COUNT \ - REDUCE_JOB_COUNT=$REDUCE_JOB_COUNT WORKER_COUNT=4 \ - INDEX_SOURCE_ROOT=file://$1 INDEX_TARGET=file://$2 \ - timeout -k 30s 10s ./reduce & - - echo "pid=$!" - pids+=($!) -done - -sleep $REDUCE_JOB_COUNT - -# Check the status codes for all of the map processes. -for i in "${!pids[@]}"; do - pid=${pids[$i]} - echo -n "Checking job $i (pid=$pid)... " - if ! wait "$pid"; then - echo "failed" - exit 1 - else - echo "succeeded!" - fi -done - -rm ./reduce # cleanup -echo "All jobs succeeded!" -exit 0 diff --git a/exp/lighthorizon/index/cmd/single/main.go b/exp/lighthorizon/index/cmd/single/main.go deleted file mode 100644 index 7661b160dc..0000000000 --- a/exp/lighthorizon/index/cmd/single/main.go +++ /dev/null @@ -1,59 +0,0 @@ -package main - -import ( - "context" - "flag" - "runtime" - "strings" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/network" - "github.com/stellar/go/support/log" -) - -func main() { - sourceUrl := flag.String("source", "gcs://horizon-archive-poc", "history archive url to read txmeta files") - targetUrl := flag.String("target", "file://indexes", "where to write indexes") - networkPassphrase := flag.String("network-passphrase", network.TestNetworkPassphrase, "network passphrase") - start := flag.Int("start", 2, "ledger to start at (inclusive, default: 2, the earliest)") - end := flag.Int("end", 0, "ledger to end at (inclusive, default: 0, the latest as of start time)") - modules := flag.String("modules", "accounts,transactions", "comma-separated list of modules to index (default: all)") - watch := flag.Bool("watch", false, "whether to watch the `source` for new "+ - "txmeta files and index them (default: false). "+ - "note: `-watch` implies a continuous `-end 0` to get to the latest ledger in txmeta files") - workerCount := flag.Int("workers", runtime.NumCPU()-1, "number of workers (default: # of CPUs - 1)") - - flag.Parse() - log.SetLevel(log.InfoLevel) - // log.SetLevel(log.DebugLevel) - - builder, err := index.BuildIndices( - context.Background(), - *sourceUrl, - *targetUrl, - *networkPassphrase, - historyarchive.Range{ - Low: uint32(max(*start, 2)), - High: uint32(*end), - }, - strings.Split(*modules, ","), - *workerCount, - ) - if err != nil { - panic(err) - } - - if *watch { - if err := builder.Watch(context.Background()); err != nil { - panic(err) - } - } -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/exp/lighthorizon/index/cmd/single_test.go b/exp/lighthorizon/index/cmd/single_test.go deleted file mode 100644 index 58620d2ef9..0000000000 --- a/exp/lighthorizon/index/cmd/single_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package main_test - -import ( - "context" - "encoding/hex" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "testing" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/ingest" - "github.com/stellar/go/ingest/ledgerbackend" - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/network" - "github.com/stellar/go/support/storage" - "github.com/stellar/go/toid" - "github.com/stretchr/testify/require" - - "github.com/stellar/go/exp/lighthorizon/index" -) - -const ( - txmetaSource = "file://./testdata/" -) - -/** - * There are three parts to testing this correctly: - * - test that single-process indexing works - * - test that single-process w/ multi-worker works - * - test map-reduce against the single-process results - * - * Therefore, if any of these fail, the subsequent ones are unreliable. - */ - -func TestSingleProcess(tt *testing.T) { - eldestLedger, latestLedger := GetFixtureLedgerRange(tt) - checkpoints := historyarchive.NewCheckpointManager(0) - - // We want two test variations: - // - starting at the first ledger in a checkpoint range - // - starting at an arbitrary ledger - // - // To do this, we adjust the known set of fixture ledgers we have. - var eldestCheckpointLedger uint32 - if checkpoints.IsCheckpoint(eldestLedger - 1) { - eldestCheckpointLedger = eldestLedger // first in range - eldestLedger += 5 // somewhere in the "middle" - } else { - eldestCheckpointLedger = checkpoints.NextCheckpoint(eldestLedger-1) + 1 - eldestLedger++ - } - - tt.Run("start-at-checkpoint", func(t *testing.T) { - testSingleProcess(tt, historyarchive.Range{ - Low: eldestCheckpointLedger, - High: latestLedger, - }) - }) - - tt.Run("start-at-ledger", func(t *testing.T) { - testSingleProcess(tt, historyarchive.Range{ - Low: eldestLedger, - High: latestLedger, - }) - }) -} - -func testSingleProcess(t *testing.T, ledgerRange historyarchive.Range) { - var ( - firstLedger = ledgerRange.Low - lastLedger = ledgerRange.High - ledgerCount = ledgerRange.High - ledgerRange.Low + 1 - ) - - t.Logf("Validating single-process builder on ledger range [%d, %d] (%d ledgers)", - firstLedger, lastLedger, ledgerCount) - - workerCount := 4 - tmpDir := filepath.Join("file://", t.TempDir()) - t.Logf("Storing indices in %s", tmpDir) - - ctx := context.Background() - _, err := index.BuildIndices( - ctx, - txmetaSource, - tmpDir, - network.TestNetworkPassphrase, - historyarchive.Range{Low: firstLedger, High: lastLedger}, - []string{ - "accounts", - "transactions", - }, - workerCount, - ) - require.NoError(t, err) - - hashes, participants := IndexLedgerRange(t, txmetaSource, firstLedger, lastLedger) - - store, err := index.Connect(tmpDir) - require.NoError(t, err) - require.NotNil(t, store) - - // Ensure the participants reported by the index and the ones we - // tracked while ingesting the ledger range match. - AssertParticipantsEqual(t, participants, store) - - // Ensure the transactions reported by the index match the ones - // tracked when ingesting the ledger range ourselves. - AssertTxsEqual(t, hashes, store) -} - -func AssertTxsEqual(t *testing.T, expected map[string]int64, actual index.Store) { - for hash, knownTOID := range expected { - rawHash, err := hex.DecodeString(hash) - require.NoError(t, err, "bug") - require.Len(t, rawHash, 32) - - tempBuf := [32]byte{} - copy(tempBuf[:], rawHash[:]) - - rawTOID, err := actual.TransactionTOID(tempBuf) - require.NoErrorf(t, err, "expected TOID for tx hash %s", hash) - - require.Equalf(t, knownTOID, rawTOID, - "expected TOID %v, got %v", - toid.Parse(knownTOID), toid.Parse(rawTOID)) - } -} - -func AssertParticipantsEqual(t *testing.T, expected map[string][]uint32, actual index.Store) { - accounts, err := actual.ReadAccounts() - - require.NoError(t, err) - require.Len(t, accounts, len(expected)) - for account := range expected { - require.Contains(t, accounts, account) - } - - for account, knownCheckpoints := range expected { - // Ensure that the "everything" index exists for the account. - index, err := actual.Read(account) - require.NoError(t, err) - require.Contains(t, index, "all/all") - - // Ensure that all of the active checkpoints reported by the index match the ones we - // tracked while ingesting the range ourselves. - activeCheckpoints := []uint32{} - lastActiveCheckpoint := uint32(0) - for { - lastActiveCheckpoint, err = actual.NextActive(account, "all/all", lastActiveCheckpoint) - if err == io.EOF { - break - } - require.NoError(t, err) - - activeCheckpoints = append(activeCheckpoints, lastActiveCheckpoint) - lastActiveCheckpoint += 1 // hit next active one - } - - require.Equalf(t, knownCheckpoints, activeCheckpoints, - "incorrect checkpoints for %s", account) - } -} - -// IndexLedgerRange will connect to a dump of ledger txmeta for the given ledger -// range and build two maps from scratch (i.e. without using the indexer) by -// ingesting them manually: -// -// - a map of tx hashes to TOIDs -// - a map of accounts to a list of checkpoints they were active in -// -// These should be used as a baseline comparison of the indexer, ensuring that -// all of the data is identical. -func IndexLedgerRange( - t *testing.T, - txmetaSource string, - startLedger, endLedger uint32, // inclusive -) ( - map[string]int64, // map of "tx hash": TOID - map[string][]uint32, // map of "account": {checkpoint, checkpoint, ...} -) { - ctx := context.Background() - backend, err := historyarchive.ConnectBackend( - txmetaSource, - storage.ConnectOptions{ - Context: ctx, - S3Region: "us-east-1", - }, - ) - require.NoError(t, err) - - metaArchive := metaarchive.NewMetaArchive(backend) - - ledgerBackend := ledgerbackend.NewHistoryArchiveBackend(metaArchive) - defer ledgerBackend.Close() - - participation := make(map[string][]uint32) - hashes := make(map[string]int64) - - for ledgerSeq := startLedger; ledgerSeq <= endLedger; ledgerSeq++ { - ledger, err := ledgerBackend.GetLedger(ctx, uint32(ledgerSeq)) - require.NoError(t, err) - require.EqualValues(t, ledgerSeq, ledger.LedgerSequence()) - - reader, err := ingest.NewLedgerTransactionReaderFromLedgerCloseMeta( - network.TestNetworkPassphrase, ledger) - require.NoError(t, err) - - for { - tx, err := reader.Read() - if err == io.EOF { - break - } - require.NoError(t, err) - - participants, err := index.GetTransactionParticipants(tx) - require.NoError(t, err) - - for _, participant := range participants { - checkpoint := index.GetCheckpointNumber(ledgerSeq) - - // Track the checkpoint in which activity occurred, keeping the - // list duplicate-free. - if list, ok := participation[participant]; ok { - if list[len(list)-1] != checkpoint { - participation[participant] = append(list, checkpoint) - } - } else { - participation[participant] = []uint32{checkpoint} - } - } - - // Track the ledger sequence in which every tx occurred. - hash := hex.EncodeToString(tx.Result.TransactionHash[:]) - hashes[hash] = toid.New( - int32(ledger.LedgerSequence()), - int32(tx.Index), - 0, - ).ToInt64() - } - } - - return hashes, participation -} - -// GetFixtureLedgerRange determines the oldest and latest ledgers w/in the -// fixture data. It's *essentially* equivalent to (but better than, since it -// handles the existence of non-integer files): -// -// LOW=$(ls $txmetaSource/ledgers | sort -n | head -n1) -// HIGH=$(ls $txmetaSource/ledgers | sort -n | tail -n1) -func GetFixtureLedgerRange(t *testing.T) (low uint32, high uint32) { - txmetaSourceDir := strings.Replace( - txmetaSource, - "file://", "", - 1) - files, err := ioutil.ReadDir(filepath.Join(txmetaSourceDir, "ledgers")) - require.NoError(t, err) - - for _, file := range files { - ledgerNum, innerErr := strconv.ParseUint(file.Name(), 10, 32) - if innerErr != nil { // non-integer filename - continue - } - - ledger := uint32(ledgerNum) - if ledger < low || low == 0 { - low = ledger - } - if ledger > high || high == 0 { - high = ledger - } - } - - return low, high -} diff --git a/exp/lighthorizon/index/cmd/testdata/latest b/exp/lighthorizon/index/cmd/testdata/latest deleted file mode 100644 index 9f53cd22d0..0000000000 --- a/exp/lighthorizon/index/cmd/testdata/latest +++ /dev/null @@ -1 +0,0 @@ -1410367 \ No newline at end of file diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410048 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410048 deleted file mode 100644 index 6eb8fee0ce..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410048 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410049 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410049 deleted file mode 100644 index e253f8658a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410049 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410050 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410050 deleted file mode 100644 index e4e5598abe..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410050 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410051 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410051 deleted file mode 100644 index f5735aab0a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410051 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410052 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410052 deleted file mode 100644 index 2aa528231a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410052 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410053 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410053 deleted file mode 100644 index 25b592c2ea..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410053 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410054 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410054 deleted file mode 100644 index 6515d892d6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410054 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410055 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410055 deleted file mode 100644 index 818646f4c3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410055 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410056 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410056 deleted file mode 100644 index 728fcd2b22..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410056 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410057 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410057 deleted file mode 100644 index 2ffa35e1d0..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410057 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410058 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410058 deleted file mode 100644 index 159d768154..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410058 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410059 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410059 deleted file mode 100644 index 7911dde3ff..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410059 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410060 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410060 deleted file mode 100644 index dd9fc4b93c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410060 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410061 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410061 deleted file mode 100644 index bbd1823295..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410061 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410062 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410062 deleted file mode 100644 index 9b942201c7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410062 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410063 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410063 deleted file mode 100644 index 2ff80dfa66..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410063 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410064 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410064 deleted file mode 100644 index b33caa50d4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410064 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410065 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410065 deleted file mode 100644 index 13b942c6a2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410065 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410066 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410066 deleted file mode 100644 index a39f6857dc..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410066 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410067 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410067 deleted file mode 100644 index f093ddb040..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410067 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410068 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410068 deleted file mode 100644 index 7329c0c54e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410068 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410069 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410069 deleted file mode 100644 index 5b4dad0b87..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410069 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410070 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410070 deleted file mode 100644 index a8088c96aa..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410070 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410071 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410071 deleted file mode 100644 index 550692bf69..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410071 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410072 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410072 deleted file mode 100644 index 73d1d63c7e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410072 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410073 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410073 deleted file mode 100644 index 12c2708a17..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410073 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410074 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410074 deleted file mode 100644 index 9c5711b623..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410074 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410075 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410075 deleted file mode 100644 index 4e376dd57d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410075 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410076 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410076 deleted file mode 100644 index cf94b43e9a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410076 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410077 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410077 deleted file mode 100644 index 3a04690232..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410077 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410078 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410078 deleted file mode 100644 index 72dd66b709..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410078 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410079 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410079 deleted file mode 100644 index ff584b34d8..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410079 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410080 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410080 deleted file mode 100644 index 223d95b003..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410080 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410081 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410081 deleted file mode 100644 index fbd441566e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410081 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410082 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410082 deleted file mode 100644 index 71ae40a074..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410082 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410083 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410083 deleted file mode 100644 index 94eba6b64d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410083 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410084 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410084 deleted file mode 100644 index 55e0a61356..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410084 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410085 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410085 deleted file mode 100644 index c5b81a78f2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410085 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410086 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410086 deleted file mode 100644 index 53663361c2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410086 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410087 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410087 deleted file mode 100644 index 292b4c2d43..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410087 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410088 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410088 deleted file mode 100644 index 388d4569e7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410088 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410089 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410089 deleted file mode 100644 index 401d806c3a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410089 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410090 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410090 deleted file mode 100644 index 86779c9e3a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410090 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410091 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410091 deleted file mode 100644 index d35f923389..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410091 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410092 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410092 deleted file mode 100644 index 0f5edecee7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410092 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410093 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410093 deleted file mode 100644 index 0fd9c17e7b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410093 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410094 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410094 deleted file mode 100644 index 7e05d0f9bb..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410094 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410095 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410095 deleted file mode 100644 index a0b1899f2c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410095 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410096 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410096 deleted file mode 100644 index 1ce5bdbd05..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410096 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410097 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410097 deleted file mode 100644 index 36c7b0d4e7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410097 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410098 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410098 deleted file mode 100644 index a05bdbacfc..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410098 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410099 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410099 deleted file mode 100644 index 4779b09f8b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410099 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410100 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410100 deleted file mode 100644 index f4be81c033..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410100 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410101 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410101 deleted file mode 100644 index af0a916bdf..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410101 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410102 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410102 deleted file mode 100644 index c56cfdd6ad..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410102 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410103 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410103 deleted file mode 100644 index fe975c2971..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410103 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410104 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410104 deleted file mode 100644 index 3a1c6ff7b3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410104 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410105 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410105 deleted file mode 100644 index 8362ded3c7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410105 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410106 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410106 deleted file mode 100644 index 4006a603f1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410106 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410107 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410107 deleted file mode 100644 index ed79f027a6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410107 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410108 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410108 deleted file mode 100644 index 16f2f5f6da..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410108 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410109 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410109 deleted file mode 100644 index 20e756d66d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410109 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410110 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410110 deleted file mode 100644 index e9ded78c9b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410110 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410111 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410111 deleted file mode 100644 index b52d265c3a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410111 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410112 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410112 deleted file mode 100644 index 18b155faf9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410112 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410113 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410113 deleted file mode 100644 index 13421a93a1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410113 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410114 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410114 deleted file mode 100644 index 34bdd0a171..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410114 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410115 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410115 deleted file mode 100644 index 579af24555..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410115 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410116 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410116 deleted file mode 100644 index 36387fe2ee..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410116 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410117 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410117 deleted file mode 100644 index db5ec06fd7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410117 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410118 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410118 deleted file mode 100644 index 5d5234d0a0..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410118 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410119 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410119 deleted file mode 100644 index b4da038696..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410119 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410120 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410120 deleted file mode 100644 index 892ef45d70..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410120 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410121 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410121 deleted file mode 100644 index 48e472d685..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410121 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410122 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410122 deleted file mode 100644 index 17c1509e02..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410122 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410123 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410123 deleted file mode 100644 index ccf93278b9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410123 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410124 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410124 deleted file mode 100644 index 0e792ff9b3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410124 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410125 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410125 deleted file mode 100644 index b66e16e80f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410125 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410126 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410126 deleted file mode 100644 index 67f1d1a7a5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410126 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410127 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410127 deleted file mode 100644 index e8e1d22bc5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410127 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410128 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410128 deleted file mode 100644 index 17a0790a93..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410128 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410129 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410129 deleted file mode 100644 index c942106cf9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410129 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410130 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410130 deleted file mode 100644 index c90e7f44d4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410130 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410131 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410131 deleted file mode 100644 index 2bd09182e8..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410131 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410132 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410132 deleted file mode 100644 index 09032447ac..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410132 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410133 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410133 deleted file mode 100644 index 6b6bafbb87..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410133 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410134 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410134 deleted file mode 100644 index 7e60093de2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410134 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410135 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410135 deleted file mode 100644 index 3106790205..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410135 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410136 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410136 deleted file mode 100644 index f54a0d0366..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410136 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410137 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410137 deleted file mode 100644 index c7094c2187..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410137 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410138 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410138 deleted file mode 100644 index d49827bbcf..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410138 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410139 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410139 deleted file mode 100644 index f7177934d2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410139 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410140 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410140 deleted file mode 100644 index d08b2214ab..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410140 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410141 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410141 deleted file mode 100644 index 71202b8754..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410141 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410142 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410142 deleted file mode 100644 index 2add5746e7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410142 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410143 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410143 deleted file mode 100644 index 1291873dc6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410143 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410144 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410144 deleted file mode 100644 index 75866b93db..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410144 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410145 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410145 deleted file mode 100644 index 842d800c67..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410145 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410146 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410146 deleted file mode 100644 index a597ff12ba..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410146 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410147 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410147 deleted file mode 100644 index 1d6545455d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410147 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410148 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410148 deleted file mode 100644 index 4e22f6aa65..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410148 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410149 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410149 deleted file mode 100644 index 7e28da7838..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410149 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410150 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410150 deleted file mode 100644 index a2887e89bd..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410150 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410151 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410151 deleted file mode 100644 index 1af403eef2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410151 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410152 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410152 deleted file mode 100644 index 129cc967a3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410152 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410153 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410153 deleted file mode 100644 index 90fb6ebf05..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410153 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410154 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410154 deleted file mode 100644 index b873382d74..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410154 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410155 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410155 deleted file mode 100644 index 52c73b5891..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410155 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410156 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410156 deleted file mode 100644 index 9679d9e283..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410156 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410157 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410157 deleted file mode 100644 index 96b3cdc13b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410157 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410158 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410158 deleted file mode 100644 index e988977554..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410158 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410159 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410159 deleted file mode 100644 index 4f3d39eb00..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410159 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410160 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410160 deleted file mode 100644 index 048e5e4c93..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410160 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410161 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410161 deleted file mode 100644 index 0904b1bce9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410161 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410162 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410162 deleted file mode 100644 index a8a09edf7b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410162 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410163 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410163 deleted file mode 100644 index 67783f0c09..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410163 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410164 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410164 deleted file mode 100644 index df9e2a10c3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410164 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410165 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410165 deleted file mode 100644 index e4143ace56..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410165 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410166 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410166 deleted file mode 100644 index 34ff178c0a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410166 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410167 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410167 deleted file mode 100644 index 4dd50c08cc..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410167 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410168 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410168 deleted file mode 100644 index de80fde445..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410168 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410169 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410169 deleted file mode 100644 index 3f8de9184e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410169 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410170 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410170 deleted file mode 100644 index bc33e9d5e4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410170 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410171 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410171 deleted file mode 100644 index 996be09a53..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410171 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410172 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410172 deleted file mode 100644 index 98d8c4e6a5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410172 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410173 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410173 deleted file mode 100644 index bed9cbede3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410173 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410174 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410174 deleted file mode 100644 index 395f0a84e3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410174 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410175 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410175 deleted file mode 100644 index a4317fb049..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410175 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410176 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410176 deleted file mode 100644 index b0e4cf4dee..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410176 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410177 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410177 deleted file mode 100644 index 794bec0f8a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410177 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410178 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410178 deleted file mode 100644 index 16fc675406..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410178 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410179 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410179 deleted file mode 100644 index 91f91e0338..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410179 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410180 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410180 deleted file mode 100644 index fcab816100..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410180 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410181 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410181 deleted file mode 100644 index e9e3eb84c8..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410181 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410182 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410182 deleted file mode 100644 index dcc1682f9e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410182 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410183 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410183 deleted file mode 100644 index c1fcb6af03..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410183 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410184 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410184 deleted file mode 100644 index 7e9c01a01d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410184 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410185 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410185 deleted file mode 100644 index 0fbabf1e04..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410185 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410186 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410186 deleted file mode 100644 index 4c59920abd..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410186 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410187 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410187 deleted file mode 100644 index f2df1e2779..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410187 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410188 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410188 deleted file mode 100644 index 0d89d94c04..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410188 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410189 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410189 deleted file mode 100644 index d5fcee359b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410189 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410190 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410190 deleted file mode 100644 index 9065f477a9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410190 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410191 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410191 deleted file mode 100644 index 39b6cb5623..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410191 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410192 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410192 deleted file mode 100644 index e6d5af2aa5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410192 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410193 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410193 deleted file mode 100644 index 02cbfec4d7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410193 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410194 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410194 deleted file mode 100644 index 3c3da42cdb..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410194 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410195 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410195 deleted file mode 100644 index f683478e9b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410195 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410196 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410196 deleted file mode 100644 index 5f404193f9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410196 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410197 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410197 deleted file mode 100644 index 099d5339cd..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410197 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410198 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410198 deleted file mode 100644 index d0ebc73fab..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410198 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410199 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410199 deleted file mode 100644 index 092fec08aa..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410199 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410200 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410200 deleted file mode 100644 index 336b92a9f0..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410200 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410201 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410201 deleted file mode 100644 index f1aba9d150..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410201 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410202 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410202 deleted file mode 100644 index 218d2132ff..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410202 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410203 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410203 deleted file mode 100644 index 5cfc3c6133..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410203 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410204 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410204 deleted file mode 100644 index ab8afac47c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410204 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410205 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410205 deleted file mode 100644 index 23ff2c2ec1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410205 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410206 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410206 deleted file mode 100644 index cd1d9c6e8b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410206 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410207 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410207 deleted file mode 100644 index e0ccae2016..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410207 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410208 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410208 deleted file mode 100644 index cf0ee7f8ac..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410208 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410209 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410209 deleted file mode 100644 index 57eaacdd4c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410209 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410210 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410210 deleted file mode 100644 index 5fe01f0630..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410210 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410211 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410211 deleted file mode 100644 index 622e4eacc0..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410211 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410212 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410212 deleted file mode 100644 index 159d23bd74..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410212 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410213 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410213 deleted file mode 100644 index 0339effab3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410213 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410214 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410214 deleted file mode 100644 index 5cd5487c0f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410214 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410215 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410215 deleted file mode 100644 index 3db4b13f69..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410215 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410216 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410216 deleted file mode 100644 index e699e0ff17..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410216 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410217 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410217 deleted file mode 100644 index a0e424ac4d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410217 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410218 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410218 deleted file mode 100644 index 9ace288604..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410218 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410219 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410219 deleted file mode 100644 index 19b9595b10..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410219 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410220 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410220 deleted file mode 100644 index 1d2bdc322d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410220 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410221 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410221 deleted file mode 100644 index 48fb5a288f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410221 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410222 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410222 deleted file mode 100644 index 11bb83bf51..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410222 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410223 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410223 deleted file mode 100644 index f9b195e352..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410223 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410224 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410224 deleted file mode 100644 index 72fd61b53c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410224 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410225 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410225 deleted file mode 100644 index bd6e098075..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410225 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410226 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410226 deleted file mode 100644 index b9d6071107..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410226 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410227 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410227 deleted file mode 100644 index 8a5b0da8c9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410227 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410228 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410228 deleted file mode 100644 index 1be6873134..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410228 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410229 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410229 deleted file mode 100644 index a46124cc12..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410229 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410230 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410230 deleted file mode 100644 index 4a9b313035..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410230 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410231 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410231 deleted file mode 100644 index ea5db50f21..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410231 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410232 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410232 deleted file mode 100644 index d4df3ec804..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410232 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410233 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410233 deleted file mode 100644 index d1b03b6d09..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410233 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410234 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410234 deleted file mode 100644 index f72536d0d6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410234 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410235 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410235 deleted file mode 100644 index 33a57241a9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410235 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410236 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410236 deleted file mode 100644 index fd88249d7b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410236 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410237 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410237 deleted file mode 100644 index 5c139b5cce..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410237 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410238 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410238 deleted file mode 100644 index 93c5b7b42f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410238 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410239 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410239 deleted file mode 100644 index bb3c5e52a4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410239 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410240 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410240 deleted file mode 100644 index 67d003eda3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410240 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410241 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410241 deleted file mode 100644 index c18f0331f7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410241 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410242 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410242 deleted file mode 100644 index 340c16c7ba..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410242 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410243 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410243 deleted file mode 100644 index d224ba2805..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410243 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410244 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410244 deleted file mode 100644 index 9699ceb49e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410244 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410245 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410245 deleted file mode 100644 index c05484d31a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410245 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410246 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410246 deleted file mode 100644 index 2e785b8219..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410246 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410247 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410247 deleted file mode 100644 index 2069d93da6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410247 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410248 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410248 deleted file mode 100644 index f074d88542..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410248 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410249 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410249 deleted file mode 100644 index a96bed5f94..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410249 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410250 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410250 deleted file mode 100644 index 21a3793884..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410250 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410251 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410251 deleted file mode 100644 index 3b0f0f86fc..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410251 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410252 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410252 deleted file mode 100644 index befb892430..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410252 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410253 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410253 deleted file mode 100644 index dfb813015c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410253 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410254 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410254 deleted file mode 100644 index 5338e11b07..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410254 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410255 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410255 deleted file mode 100644 index b089316285..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410255 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410256 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410256 deleted file mode 100644 index 0f076b8752..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410256 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410257 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410257 deleted file mode 100644 index 6ae2724bdc..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410257 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410258 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410258 deleted file mode 100644 index 75f90f17a7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410258 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410259 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410259 deleted file mode 100644 index b818822b59..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410259 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410260 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410260 deleted file mode 100644 index d04eb76a42..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410260 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410261 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410261 deleted file mode 100644 index b118f78d07..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410261 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410262 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410262 deleted file mode 100644 index 5a7451e7cb..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410262 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410263 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410263 deleted file mode 100644 index 8effaa7aec..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410263 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410264 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410264 deleted file mode 100644 index b1e18e3af2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410264 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410265 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410265 deleted file mode 100644 index 50350cf53e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410265 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410266 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410266 deleted file mode 100644 index 9be54fdc2e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410266 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410267 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410267 deleted file mode 100644 index cef2197e50..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410267 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410268 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410268 deleted file mode 100644 index 982e8fdee6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410268 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410269 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410269 deleted file mode 100644 index 194edc9193..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410269 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410270 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410270 deleted file mode 100644 index 5ab3583ffe..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410270 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410271 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410271 deleted file mode 100644 index 5d755fd90e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410271 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410272 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410272 deleted file mode 100644 index 9b5a3403bd..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410272 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410273 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410273 deleted file mode 100644 index 9271022802..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410273 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410274 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410274 deleted file mode 100644 index 2a6cb0b531..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410274 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410275 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410275 deleted file mode 100644 index 5799bbe692..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410275 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410276 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410276 deleted file mode 100644 index d1211ac0b1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410276 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410277 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410277 deleted file mode 100644 index bb276412f6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410277 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410278 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410278 deleted file mode 100644 index ef1007361e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410278 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410279 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410279 deleted file mode 100644 index da3e54d5b6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410279 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410280 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410280 deleted file mode 100644 index d7f998a6c4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410280 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410281 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410281 deleted file mode 100644 index 02192f6890..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410281 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410282 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410282 deleted file mode 100644 index 8fe66297d1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410282 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410283 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410283 deleted file mode 100644 index 446be0bd36..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410283 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410284 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410284 deleted file mode 100644 index 96ae96bfd6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410284 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410285 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410285 deleted file mode 100644 index d63e8309f7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410285 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410286 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410286 deleted file mode 100644 index 3654eb085b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410286 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410287 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410287 deleted file mode 100644 index b4b099a2e3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410287 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410288 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410288 deleted file mode 100644 index c334b46a8f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410288 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410289 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410289 deleted file mode 100644 index 512638eb61..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410289 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410290 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410290 deleted file mode 100644 index 57f569dcde..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410290 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410291 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410291 deleted file mode 100644 index f0233c2ee2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410291 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410292 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410292 deleted file mode 100644 index b7e3e9ee0c..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410292 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410293 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410293 deleted file mode 100644 index 4199d3b68a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410293 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410294 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410294 deleted file mode 100644 index 7bf15567d2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410294 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410295 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410295 deleted file mode 100644 index c2a15d1d09..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410295 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410296 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410296 deleted file mode 100644 index 27a85f1ee1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410296 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410297 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410297 deleted file mode 100644 index 565aee38a3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410297 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410298 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410298 deleted file mode 100644 index 717df905de..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410298 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410299 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410299 deleted file mode 100644 index 3b297d0996..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410299 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410300 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410300 deleted file mode 100644 index e8c2d1b842..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410300 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410301 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410301 deleted file mode 100644 index 55af753079..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410301 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410302 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410302 deleted file mode 100644 index 613a7cefe9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410302 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410303 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410303 deleted file mode 100644 index 4e4b63d9f2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410303 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410304 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410304 deleted file mode 100644 index 1c8773292b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410304 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410305 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410305 deleted file mode 100644 index 01f0aa7bed..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410305 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410306 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410306 deleted file mode 100644 index 94cfc57359..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410306 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410307 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410307 deleted file mode 100644 index 28c6d05d2a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410307 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410308 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410308 deleted file mode 100644 index fa4d61c5f1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410308 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410309 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410309 deleted file mode 100644 index 884a3c530b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410309 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410310 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410310 deleted file mode 100644 index 13a07cfb7d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410310 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410311 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410311 deleted file mode 100644 index dc7027f06f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410311 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410312 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410312 deleted file mode 100644 index c56b241904..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410312 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410313 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410313 deleted file mode 100644 index 2f97426227..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410313 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410314 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410314 deleted file mode 100644 index b8f7486ae9..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410314 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410315 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410315 deleted file mode 100644 index 166968ec0d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410315 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410316 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410316 deleted file mode 100644 index 7f3d476345..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410316 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410317 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410317 deleted file mode 100644 index 84053d72ed..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410317 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410318 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410318 deleted file mode 100644 index d51a3f6468..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410318 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410319 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410319 deleted file mode 100644 index 7b565d5ec6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410319 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410320 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410320 deleted file mode 100644 index dd8d016d94..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410320 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410321 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410321 deleted file mode 100644 index 9be013ba34..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410321 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410322 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410322 deleted file mode 100644 index a27642a436..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410322 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410323 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410323 deleted file mode 100644 index 4854b00f41..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410323 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410324 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410324 deleted file mode 100644 index ca87e679af..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410324 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410325 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410325 deleted file mode 100644 index 26e0997a8d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410325 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410326 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410326 deleted file mode 100644 index a09965dc8d..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410326 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410327 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410327 deleted file mode 100644 index 9f1ccd3ec3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410327 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410328 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410328 deleted file mode 100644 index 6a0c331690..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410328 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410329 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410329 deleted file mode 100644 index 5a9e0f15d5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410329 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410330 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410330 deleted file mode 100644 index 6067e4e4f2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410330 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410331 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410331 deleted file mode 100644 index 2dd6f71e88..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410331 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410332 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410332 deleted file mode 100644 index 72f8c3f65f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410332 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410333 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410333 deleted file mode 100644 index 96e0cb0c76..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410333 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410334 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410334 deleted file mode 100644 index cb69dbd7c2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410334 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410335 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410335 deleted file mode 100644 index 5df72efbf5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410335 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410336 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410336 deleted file mode 100644 index eea0a670f4..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410336 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410337 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410337 deleted file mode 100644 index f40e883fc5..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410337 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410338 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410338 deleted file mode 100644 index 8fc4d52e77..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410338 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410339 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410339 deleted file mode 100644 index 334ae0a835..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410339 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410340 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410340 deleted file mode 100644 index fe15d2001e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410340 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410341 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410341 deleted file mode 100644 index 50c32dbbce..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410341 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410342 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410342 deleted file mode 100644 index 8ad22ad1f3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410342 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410343 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410343 deleted file mode 100644 index 453801501f..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410343 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410344 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410344 deleted file mode 100644 index 70678ad6a7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410344 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410345 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410345 deleted file mode 100644 index e2533afc30..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410345 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410346 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410346 deleted file mode 100644 index 559106ba2b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410346 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410347 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410347 deleted file mode 100644 index 8d9b81b6f8..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410347 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410348 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410348 deleted file mode 100644 index 98c5b3fe96..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410348 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410349 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410349 deleted file mode 100644 index 26069739eb..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410349 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410350 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410350 deleted file mode 100644 index 0a8b820ef0..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410350 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410351 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410351 deleted file mode 100644 index b0d62a1c73..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410351 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410352 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410352 deleted file mode 100644 index 3a972c16b1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410352 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410353 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410353 deleted file mode 100644 index 5fc02221c3..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410353 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410354 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410354 deleted file mode 100644 index 942e9e3aa1..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410354 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410355 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410355 deleted file mode 100644 index 09230a1709..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410355 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410356 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410356 deleted file mode 100644 index da71429d50..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410356 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410357 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410357 deleted file mode 100644 index 6a628ab91b..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410357 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410358 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410358 deleted file mode 100644 index 997bc24237..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410358 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410359 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410359 deleted file mode 100644 index b40c27aa4a..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410359 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410360 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410360 deleted file mode 100644 index 1e8d8ec2a2..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410360 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410361 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410361 deleted file mode 100644 index 198df6f33e..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410361 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410362 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410362 deleted file mode 100644 index ed256c96db..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410362 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410363 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410363 deleted file mode 100644 index 839c354691..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410363 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410364 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410364 deleted file mode 100644 index c54cfb52da..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410364 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410365 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410365 deleted file mode 100644 index e989965ff7..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410365 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410366 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410366 deleted file mode 100644 index 9a1eb601e6..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410366 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/ledgers/1410367 b/exp/lighthorizon/index/cmd/testdata/ledgers/1410367 deleted file mode 100644 index 68ee1497fa..0000000000 Binary files a/exp/lighthorizon/index/cmd/testdata/ledgers/1410367 and /dev/null differ diff --git a/exp/lighthorizon/index/cmd/testdata/regenerate.sh b/exp/lighthorizon/index/cmd/testdata/regenerate.sh deleted file mode 100644 index 4cc0d9c50f..0000000000 --- a/exp/lighthorizon/index/cmd/testdata/regenerate.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run -e WRITE_LATEST_PATH=true -e START=1410048 -e END=1410367 -e ARCHIVE_TARGET=file:///testdata/ -v $PWD:/tetsdata/ stellar/horizon-ledgerexporter:latest diff --git a/exp/lighthorizon/index/connect.go b/exp/lighthorizon/index/connect.go deleted file mode 100644 index b0787b4a77..0000000000 --- a/exp/lighthorizon/index/connect.go +++ /dev/null @@ -1,68 +0,0 @@ -package index - -import ( - "fmt" - "net/url" - "path/filepath" - - "github.com/aws/aws-sdk-go/aws" - backend "github.com/stellar/go/exp/lighthorizon/index/backend" -) - -func Connect(backendUrl string) (Store, error) { - return ConnectWithConfig(StoreConfig{URL: backendUrl}) -} - -func ConnectWithConfig(config StoreConfig) (Store, error) { - if config.Workers <= 0 { - config.Workers = 1 - } - - parsed, err := url.Parse(config.URL) - if err != nil { - return nil, err - } - switch parsed.Scheme { - case "s3": - s3Url := fmt.Sprintf("%s/%s", config.URL, config.URLSubPath) - parsed, err = url.Parse(s3Url) - if err != nil { - return nil, err - } - awsConfig := &aws.Config{} - query := parsed.Query() - if region := query.Get("region"); region != "" { - awsConfig.Region = aws.String(region) - } - - return NewS3Store(awsConfig, parsed.Host, parsed.Path, config) - - case "file": - fileUrl := filepath.Join(config.URL, config.URLSubPath) - parsed, err = url.Parse(fileUrl) - if err != nil { - return nil, err - } - return NewFileStore(filepath.Join(parsed.Host, parsed.Path), config) - - default: - return nil, fmt.Errorf("unknown URL scheme: '%s' (from %s)", - parsed.Scheme, config.URL) - } -} - -func NewFileStore(prefix string, config StoreConfig) (Store, error) { - backend, err := backend.NewFileBackend(prefix, config.Workers) - if err != nil { - return nil, err - } - return NewStore(backend, config) -} - -func NewS3Store(awsConfig *aws.Config, bucket string, prefix string, indexConfig StoreConfig) (Store, error) { - backend, err := backend.NewS3Backend(awsConfig, bucket, prefix, indexConfig.Workers) - if err != nil { - return nil, err - } - return NewStore(backend, indexConfig) -} diff --git a/exp/lighthorizon/index/mock_store.go b/exp/lighthorizon/index/mock_store.go deleted file mode 100644 index db0e53e1cc..0000000000 --- a/exp/lighthorizon/index/mock_store.go +++ /dev/null @@ -1,78 +0,0 @@ -package index - -import ( - "github.com/prometheus/client_golang/prometheus" - types "github.com/stellar/go/exp/lighthorizon/index/types" - "github.com/stretchr/testify/mock" -) - -type MockStore struct { - mock.Mock -} - -func (m *MockStore) NextActive(account, index string, afterCheckpoint uint32) (uint32, error) { - args := m.Called(account, index, afterCheckpoint) - return args.Get(0).(uint32), args.Error(1) -} - -func (m *MockStore) TransactionTOID(hash [32]byte) (int64, error) { - args := m.Called(hash) - return args.Get(0).(int64), args.Error(1) -} - -func (m *MockStore) AddTransactionToIndexes(txnTOID int64, hash [32]byte) error { - args := m.Called(txnTOID, hash) - return args.Error(0) -} - -func (m *MockStore) AddParticipantsToIndexes(checkpoint uint32, index string, participants []string) error { - args := m.Called(checkpoint, index, participants) - return args.Error(0) -} - -func (m *MockStore) AddParticipantsToIndexesNoBackend(checkpoint uint32, index string, participants []string) error { - args := m.Called(checkpoint, index, participants) - return args.Error(0) -} - -func (m *MockStore) AddParticipantToIndexesNoBackend(participant string, indexes types.NamedIndices) { - m.Called(participant, indexes) -} - -func (m *MockStore) Flush() error { - args := m.Called() - return args.Error(0) -} - -func (m *MockStore) FlushAccounts() error { - args := m.Called() - return args.Error(0) -} - -func (m *MockStore) ClearMemory(arg bool) { - m.Called(arg) -} - -func (m *MockStore) Read(account string) (types.NamedIndices, error) { - args := m.Called(account) - return args.Get(0).(types.NamedIndices), args.Error(1) -} - -func (m *MockStore) ReadAccounts() ([]string, error) { - args := m.Called() - return args.Get(0).([]string), args.Error(1) -} - -func (m *MockStore) ReadTransactions(prefix string) (*types.TrieIndex, error) { - args := m.Called(prefix) - return args.Get(0).(*types.TrieIndex), args.Error(1) -} - -func (m *MockStore) MergeTransactions(prefix string, other *types.TrieIndex) error { - args := m.Called(prefix, other) - return args.Error(0) -} - -func (m *MockStore) RegisterMetrics(registry *prometheus.Registry) { - m.Called(registry) -} diff --git a/exp/lighthorizon/index/modules.go b/exp/lighthorizon/index/modules.go deleted file mode 100644 index 8af7635ef0..0000000000 --- a/exp/lighthorizon/index/modules.go +++ /dev/null @@ -1,314 +0,0 @@ -package index - -import ( - "fmt" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/ingest" - "github.com/stellar/go/toid" - "github.com/stellar/go/xdr" -) - -var ( - checkpointManager = historyarchive.NewCheckpointManager(0) -) - -const ( - ByCheckpoint = iota - ByLedger = iota -) - -type AccountIndexMode int - -func ProcessTransaction( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - return indexStore.AddTransactionToIndexes( - toid.New(int32(ledger.LedgerSequence()), int32(tx.Index), 0).ToInt64(), - tx.Result.TransactionHash, - ) -} - -func ProcessAccountsByCheckpoint( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - return ProcessAccounts(indexStore, ledger, tx, ByCheckpoint) -} - -func ProcessAccountsByLedger( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - return ProcessAccounts(indexStore, ledger, tx, ByLedger) -} - -func ProcessAccounts( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, - mode AccountIndexMode, -) error { - index := getIndex(ledger, mode) - if index == 0 { - return fmt.Errorf("Invalid account indexing mode: %d", mode) - } - - allParticipants, err := GetTransactionParticipants(tx) - if err != nil { - return err - } - - err = indexStore.AddParticipantsToIndexes(index, "all/all", allParticipants) - if err != nil { - return err - } - - paymentsParticipants, err := GetPaymentParticipants(tx) - if err != nil { - return err - } - - err = indexStore.AddParticipantsToIndexes(index, "all/payments", paymentsParticipants) - if err != nil { - return err - } - - return nil -} - -func ProcessAccountsByCheckpointWithoutBackend( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - return ProcessAccountsWithoutBackend(indexStore, ledger, tx, ByCheckpoint) -} - -func ProcessAccountsByLedgerWithoutBackend( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, -) error { - return ProcessAccountsWithoutBackend(indexStore, ledger, tx, ByLedger) - -} - -func ProcessAccountsWithoutBackend( - indexStore Store, - ledger xdr.LedgerCloseMeta, - tx ingest.LedgerTransaction, - mode AccountIndexMode, -) error { - index := getIndex(ledger, mode) - if index == 0 { - return fmt.Errorf("Invalid account indexing mode: %d", mode) - } - - allParticipants, err := GetTransactionParticipants(tx) - if err != nil { - return err - } - - err = indexStore.AddParticipantsToIndexesNoBackend(index, "all/all", allParticipants) - if err != nil { - return err - } - - paymentsParticipants, err := GetPaymentParticipants(tx) - if err != nil { - return err - } - - err = indexStore.AddParticipantsToIndexesNoBackend(index, "all/payments", paymentsParticipants) - if err != nil { - return err - } - - return nil -} - -// GetCheckpointNumber returns the next checkpoint NUMBER (NOT the checkpoint -// ledger sequence) corresponding to a given ledger sequence. -func GetCheckpointNumber(ledger uint32) uint32 { - return 1 + (ledger / checkpointManager.GetCheckpointFrequency()) -} - -func GetPaymentParticipants(transaction ingest.LedgerTransaction) ([]string, error) { - return participantsForOperations(transaction, true) -} - -func GetTransactionParticipants(transaction ingest.LedgerTransaction) ([]string, error) { - return participantsForOperations(transaction, false) -} - -// transaction - the ledger transaction -// operation - the operation within this transaction -// opIndex - the 0 based index of the operation within the transaction -func GetOperationParticipants(transaction ingest.LedgerTransaction, operation xdr.Operation, opIndex int) ([]string, error) { - return participantsForOperation(transaction, operation, opIndex, false) -} - -func participantsForOperations(transaction ingest.LedgerTransaction, onlyPayments bool) ([]string, error) { - var participants []string - - for opindex, operation := range transaction.Envelope.Operations() { - opParticipants, err := participantsForOperation(transaction, operation, opindex, onlyPayments) - if err != nil { - return []string{}, err - } - participants = append(participants, opParticipants...) - } - - // FIXME: Can/Should we make this a set? It may mean less superfluous - // insertions into the index if there's a lot of activity by this - // account in this transaction. - return participants, nil -} - -// transaction - the ledger transaction -// operation - the operation within this transaction -// opIndex - the 0 based index of the operation within the transaction -func participantsForOperation(transaction ingest.LedgerTransaction, operation xdr.Operation, opIndex int, onlyPayments bool) ([]string, error) { - participants := []string{} - opSource := operation.SourceAccount - if opSource == nil { - txSource := transaction.Envelope.SourceAccount() - opSource = &txSource - } - switch operation.Body.Type { - case xdr.OperationTypeCreateAccount, - xdr.OperationTypePayment, - xdr.OperationTypePathPaymentStrictReceive, - xdr.OperationTypePathPaymentStrictSend, - xdr.OperationTypeAccountMerge: - participants = append(participants, opSource.Address()) - - default: - if onlyPayments { - return participants, nil - } - participants = append(participants, opSource.Address()) - } - - switch operation.Body.Type { - case xdr.OperationTypeCreateAccount: - participants = append(participants, operation.Body.MustCreateAccountOp().Destination.Address()) - - case xdr.OperationTypePayment: - participants = append(participants, operation.Body.MustPaymentOp().Destination.ToAccountId().Address()) - - case xdr.OperationTypePathPaymentStrictReceive: - participants = append(participants, operation.Body.MustPathPaymentStrictReceiveOp().Destination.ToAccountId().Address()) - - case xdr.OperationTypePathPaymentStrictSend: - participants = append(participants, operation.Body.MustPathPaymentStrictSendOp().Destination.ToAccountId().Address()) - - case xdr.OperationTypeAllowTrust: - participants = append(participants, operation.Body.MustAllowTrustOp().Trustor.Address()) - - case xdr.OperationTypeAccountMerge: - participants = append(participants, operation.Body.MustDestination().ToAccountId().Address()) - - case xdr.OperationTypeCreateClaimableBalance: - for _, c := range operation.Body.MustCreateClaimableBalanceOp().Claimants { - participants = append(participants, c.MustV0().Destination.Address()) - } - - case xdr.OperationTypeBeginSponsoringFutureReserves: - participants = append(participants, operation.Body.MustBeginSponsoringFutureReservesOp().SponsoredId.Address()) - - case xdr.OperationTypeEndSponsoringFutureReserves: - // Failed transactions may not have a compliant sandwich structure - // we can rely on (e.g. invalid nesting or a being operation with - // the wrong sponsoree ID) and thus we bail out since we could - // return incorrect information. - if transaction.Result.Successful() { - sponsoree := transaction.Envelope.SourceAccount().ToAccountId().Address() - if operation.SourceAccount != nil { - sponsoree = operation.SourceAccount.Address() - } - operations := transaction.Envelope.Operations() - for i := opIndex - 1; i >= 0; i-- { - if beginOp, ok := operations[i].Body.GetBeginSponsoringFutureReservesOp(); ok && - beginOp.SponsoredId.Address() == sponsoree { - participants = append(participants, beginOp.SponsoredId.Address()) - } - } - } - - case xdr.OperationTypeRevokeSponsorship: - op := operation.Body.MustRevokeSponsorshipOp() - switch op.Type { - case xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: - participants = append(participants, getLedgerKeyParticipants(*op.LedgerKey)...) - - case xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner: - participants = append(participants, op.Signer.AccountId.Address()) - // We don't add signer as a participant because a signer can be - // arbitrary account. This can spam successful operations - // history of any account. - } - - case xdr.OperationTypeClawback: - op := operation.Body.MustClawbackOp() - participants = append(participants, op.From.ToAccountId().Address()) - - case xdr.OperationTypeSetTrustLineFlags: - op := operation.Body.MustSetTrustLineFlagsOp() - participants = append(participants, op.Trustor.Address()) - - // for the following, the only direct participant is the source_account - case xdr.OperationTypeManageBuyOffer: - case xdr.OperationTypeManageSellOffer: - case xdr.OperationTypeCreatePassiveSellOffer: - case xdr.OperationTypeSetOptions: - case xdr.OperationTypeChangeTrust: - case xdr.OperationTypeInflation: - case xdr.OperationTypeManageData: - case xdr.OperationTypeBumpSequence: - case xdr.OperationTypeClaimClaimableBalance: - case xdr.OperationTypeClawbackClaimableBalance: - case xdr.OperationTypeLiquidityPoolDeposit: - case xdr.OperationTypeLiquidityPoolWithdraw: - - default: - return nil, fmt.Errorf("unknown operation type: %s", operation.Body.Type) - } - return participants, nil -} - -// getLedgerKeyParticipants returns a list of accounts that are considered -// "participants" in a particular ledger entry. -// -// This list will have zero or one element, making it easy to expand via `...`. -func getLedgerKeyParticipants(ledgerKey xdr.LedgerKey) []string { - switch ledgerKey.Type { - case xdr.LedgerEntryTypeAccount: - return []string{ledgerKey.Account.AccountId.Address()} - case xdr.LedgerEntryTypeData: - return []string{ledgerKey.Data.AccountId.Address()} - case xdr.LedgerEntryTypeOffer: - return []string{ledgerKey.Offer.SellerId.Address()} - case xdr.LedgerEntryTypeTrustline: - return []string{ledgerKey.TrustLine.AccountId.Address()} - case xdr.LedgerEntryTypeClaimableBalance: - // nothing to do - } - return []string{} -} - -func getIndex(ledger xdr.LedgerCloseMeta, mode AccountIndexMode) uint32 { - switch mode { - case ByCheckpoint: - return GetCheckpointNumber(ledger.LedgerSequence()) - case ByLedger: - return ledger.LedgerSequence() - default: - return 0 - } -} diff --git a/exp/lighthorizon/index/store.go b/exp/lighthorizon/index/store.go deleted file mode 100644 index de5f4f6f07..0000000000 --- a/exp/lighthorizon/index/store.go +++ /dev/null @@ -1,377 +0,0 @@ -package index - -import ( - "encoding/binary" - "encoding/hex" - "io" - "os" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - backend "github.com/stellar/go/exp/lighthorizon/index/backend" - types "github.com/stellar/go/exp/lighthorizon/index/types" - "github.com/stellar/go/support/log" -) - -type Store interface { - NextActive(account, index string, afterCheckpoint uint32) (uint32, error) - TransactionTOID(hash [32]byte) (int64, error) - - AddTransactionToIndexes(txnTOID int64, hash [32]byte) error - AddParticipantsToIndexes(checkpoint uint32, index string, participants []string) error - AddParticipantsToIndexesNoBackend(checkpoint uint32, index string, participants []string) error - AddParticipantToIndexesNoBackend(participant string, indexes types.NamedIndices) - - Flush() error - FlushAccounts() error - ClearMemory(bool) - - Read(account string) (types.NamedIndices, error) - ReadAccounts() ([]string, error) - ReadTransactions(prefix string) (*types.TrieIndex, error) - - MergeTransactions(prefix string, other *types.TrieIndex) error - - RegisterMetrics(registry *prometheus.Registry) -} - -type StoreConfig struct { - // init time config - // the base url for the store resource - URL string - // optional url path to append to the base url to realize the complete url - URLSubPath string - Workers uint32 - - // runtime config - ClearMemoryOnFlush bool - - // logging & metrics - Log *log.Entry // TODO: unused for now - Metrics *prometheus.Registry -} - -type store struct { - mutex sync.RWMutex - config StoreConfig - - // data - indexes map[string]types.NamedIndices - txIndexes map[string]*types.TrieIndex - backend backend.Backend - - // metrics - indexWorkingSet prometheus.Gauge - indexWorkingSetTime prometheus.Gauge // to check if the above takes too long lmao -} - -func NewStore(backend backend.Backend, config StoreConfig) (Store, error) { - result := &store{ - indexes: map[string]types.NamedIndices{}, - txIndexes: map[string]*types.TrieIndex{}, - backend: backend, - - config: config, - - indexWorkingSet: newHorizonLiteGauge("working_set", - "Approximately how much memory (kiB) are indices using?"), - indexWorkingSetTime: newHorizonLiteGauge("working_set_time", - "How long did it take (μs) to calculate the working set size?"), - } - result.RegisterMetrics(config.Metrics) - - return result, nil -} - -func (s *store) accounts() []string { - accounts := make([]string, 0, len(s.indexes)) - for account := range s.indexes { - accounts = append(accounts, account) - } - return accounts -} - -func (s *store) FlushAccounts() error { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.backend.FlushAccounts(s.accounts()) -} - -func (s *store) Read(account string) (types.NamedIndices, error) { - return s.backend.Read(account) -} - -func (s *store) ReadAccounts() ([]string, error) { - return s.backend.ReadAccounts() -} - -func (s *store) ReadTransactions(prefix string) (*types.TrieIndex, error) { - return s.getCreateTrieIndex(prefix) -} - -func (s *store) MergeTransactions(prefix string, other *types.TrieIndex) error { - defer s.approximateWorkingSet() - - index, err := s.getCreateTrieIndex(prefix) - if err != nil { - return err - } - if err := index.Merge(other); err != nil { - return err - } - - s.mutex.Lock() - defer s.mutex.Unlock() - s.txIndexes[prefix] = index - return nil -} - -func (s *store) approximateWorkingSet() { - if s.config.Metrics == nil { - return - } - - start := time.Now() - approx := float64(0) - - for _, indices := range s.indexes { - firstIndexSize := 0 - for _, index := range indices { - firstIndexSize = index.Size() - break - } - - // There may be multiple indices for each account, but we can do a rough - // approximation for now by just assuming they're all around the same - // size. - approx += float64(len(indices) * firstIndexSize) - } - - for _, trie := range s.txIndexes { - // FIXME: Is this too slow? We probably want a TrieIndex.Size() method, - // but that's not trivial to determine for a trie. - trie.Iterate(func(key, value []byte) { - approx += float64(len(key) + len(value)) - }) - } - - s.indexWorkingSet.Set(approx / 1024) // kiB - s.indexWorkingSetTime.Set(float64(time.Since(start).Microseconds())) // μs -} - -func (s *store) Flush() error { - s.mutex.Lock() - defer s.mutex.Unlock() - defer s.approximateWorkingSet() - - if err := s.backend.Flush(s.indexes); err != nil { - return err - } - - if err := s.backend.FlushAccounts(s.accounts()); err != nil { - return err - } else if s.config.ClearMemoryOnFlush { - s.indexes = map[string]types.NamedIndices{} - } - - if err := s.backend.FlushTransactions(s.txIndexes); err != nil { - return err - } else if s.config.ClearMemoryOnFlush { - s.txIndexes = map[string]*types.TrieIndex{} - } - - return nil -} - -func (s *store) ClearMemory(doClear bool) { - s.config.ClearMemoryOnFlush = doClear -} - -func (s *store) AddTransactionToIndexes(txnTOID int64, hash [32]byte) error { - index, err := s.getCreateTrieIndex(hex.EncodeToString(hash[:1])) - if err != nil { - return err - } - - value := make([]byte, 8) - binary.BigEndian.PutUint64(value, uint64(txnTOID)) - - // We don't have to re-calculate the whole working set size for metrics - // since we're adding a known size. - if _, replaced := index.Upsert(hash[1:], value); !replaced { - s.indexWorkingSet.Add(float64(len(hash) - 1 + len(value))) - } - - return nil -} - -func (s *store) TransactionTOID(hash [32]byte) (int64, error) { - index, err := s.getCreateTrieIndex(hex.EncodeToString(hash[:1])) - if err != nil { - return 0, err - } - - value, ok := index.Get(hash[1:]) - if !ok { - return 0, io.EOF - } - return int64(binary.BigEndian.Uint64(value)), nil -} - -// AddParticipantsToIndexesNoBackend is a temp version of -// AddParticipantsToIndexes that skips backend downloads and it used in AWS -// Batch. Refactoring required to make it better. -func (s *store) AddParticipantsToIndexesNoBackend(checkpoint uint32, index string, participants []string) error { - s.mutex.Lock() - defer s.mutex.Unlock() - defer s.approximateWorkingSet() - - var err error - for _, participant := range participants { - if _, ok := s.indexes[participant]; !ok { - s.indexes[participant] = map[string]*types.BitmapIndex{} - } - - ind, ok := s.indexes[participant][index] - if !ok { - ind = &types.BitmapIndex{} - s.indexes[participant][index] = ind - } - - if innerErr := ind.SetActive(checkpoint); innerErr != nil { - err = innerErr - } - // don't break early, instead try to save as many participants as we can - } - - return err -} - -func (s *store) AddParticipantToIndexesNoBackend(participant string, indexes types.NamedIndices) { - s.mutex.Lock() - defer s.mutex.Unlock() - defer s.approximateWorkingSet() - - s.indexes[participant] = indexes -} - -func (s *store) AddParticipantsToIndexes(checkpoint uint32, index string, participants []string) error { - defer s.approximateWorkingSet() - - for _, participant := range participants { - ind, err := s.getCreateIndex(participant, index) - if err != nil { - return err - } - err = ind.SetActive(checkpoint) - if err != nil { - return err - } - } - return nil -} - -func (s *store) getCreateIndex(account, id string) (*types.BitmapIndex, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - defer s.approximateWorkingSet() - - // Check if we already have it loaded - accountIndexes, ok := s.indexes[account] - if !ok { - accountIndexes = types.NamedIndices{} - } - ind, ok := accountIndexes[id] - if ok { - return ind, nil - } - - // Check if index exists in backend - found, err := s.backend.Read(account) - if err == nil { - accountIndexes = found - } else if !os.IsNotExist(err) { - return nil, err - } - - ind, ok = accountIndexes[id] - if !ok { - // Not found anywhere, make a new one. - ind = &types.BitmapIndex{} - accountIndexes[id] = ind - } - - // We don't want to replace the entire index map in memory (even though we - // read all of it from disk), just the one we loaded from disk. Otherwise, - // we lose in-memory changes to unrelated indices. - if memoryIndices, ok := s.indexes[account]; ok { // account exists in-mem - if memoryIndex, ok2 := memoryIndices[id]; ok2 { // id exists in-mem - if memoryIndex != accountIndexes[id] { // not using in-mem already - memoryIndex.Merge(ind) - s.indexes[account][id] = memoryIndex - } - } - } else { - s.indexes[account] = accountIndexes - } - - return ind, nil -} - -func (s *store) NextActive(account, indexId string, afterCheckpoint uint32) (uint32, error) { - defer s.approximateWorkingSet() - - ind, err := s.getCreateIndex(account, indexId) - if err != nil { - return 0, err - } - return ind.NextActiveBit(afterCheckpoint) -} - -func (s *store) getCreateTrieIndex(prefix string) (*types.TrieIndex, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - defer s.approximateWorkingSet() - - // Check if we already have it loaded - index, ok := s.txIndexes[prefix] - if ok { - return index, nil - } - - // Check if index exists in backend - found, err := s.backend.ReadTransactions(prefix) - if err == nil { - s.txIndexes[prefix] = found - } else if !os.IsNotExist(err) { - return nil, err - } - - index, ok = s.txIndexes[prefix] - if !ok { - // Not found anywhere, make a new one. - index = &types.TrieIndex{} - s.txIndexes[prefix] = index - } - - return index, nil -} - -func (s *store) RegisterMetrics(registry *prometheus.Registry) { - s.config.Metrics = registry - - if registry != nil { - registry.Register(s.indexWorkingSet) - registry.Register(s.indexWorkingSetTime) - } -} - -func newHorizonLiteGauge(name, help string) prometheus.Gauge { - return prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "horizon_lite", - Subsystem: "index_store", - Name: name, - Help: help, - }) -} diff --git a/exp/lighthorizon/index/types/bitmap.go b/exp/lighthorizon/index/types/bitmap.go deleted file mode 100644 index 171115938b..0000000000 --- a/exp/lighthorizon/index/types/bitmap.go +++ /dev/null @@ -1,367 +0,0 @@ -package index - -import ( - "bytes" - "fmt" - "io" - "strings" - "sync" - - "github.com/stellar/go/support/ordered" - "github.com/stellar/go/xdr" -) - -const BitmapIndexVersion = 1 - -type BitmapIndex struct { - mutex sync.RWMutex - bitmap []byte - firstBit uint32 - lastBit uint32 -} - -type NamedIndices map[string]*BitmapIndex - -func NewBitmapIndex(b []byte) (*BitmapIndex, error) { - xdrBitmap := xdr.BitmapIndex{} - err := xdrBitmap.UnmarshalBinary(b) - if err != nil { - return nil, err - } - - return NewBitmapIndexFromXDR(xdrBitmap), nil -} - -func NewBitmapIndexFromXDR(index xdr.BitmapIndex) *BitmapIndex { - return &BitmapIndex{ - bitmap: index.Bitmap[:], - firstBit: uint32(index.FirstBit), - lastBit: uint32(index.LastBit), - } -} - -func (i *BitmapIndex) Size() int { - return len(i.bitmap) -} - -func (i *BitmapIndex) SetActive(index uint32) error { - i.mutex.Lock() - defer i.mutex.Unlock() - return i.setActive(index) -} - -func (i *BitmapIndex) SetInactive(index uint32) error { - i.mutex.Lock() - defer i.mutex.Unlock() - return i.setInactive(index) -} - -// bitShiftLeft returns a byte with the bit set corresponding to the index. In -// other words, it flips the bit corresponding to the index's "position" mod-8. -func bitShiftLeft(index uint32) byte { - if index%8 == 0 { - return 1 - } else { - return byte(1) << (8 - index%8) - } -} - -// rangeFirstBit returns the index of the first *possible* active bit in the -// bitmap. In other words, if you just have SetActive(12), this will return 9, -// because you have one byte (0b0001_0000) and the *first* value the bitmap can -// represent is 9. -func (i *BitmapIndex) rangeFirstBit() uint32 { - return (i.firstBit-1)/8*8 + 1 -} - -// rangeLastBit returns the index of the last *possible* active bit in the -// bitmap. In other words, if you just have SetActive(12), this will return 16, -// because you have one byte (0b0001_0000) and the *last* value the bitmap can -// represent is 16. -func (i *BitmapIndex) rangeLastBit() uint32 { - return i.rangeFirstBit() + uint32(len(i.bitmap))*8 - 1 -} - -func (i *BitmapIndex) setActive(index uint32) error { - if i.firstBit == 0 { - i.firstBit = index - i.lastBit = index - b := bitShiftLeft(index) - i.bitmap = []byte{b} - } else { - if index >= i.rangeFirstBit() && index <= i.rangeLastBit() { - // Update the bit in existing range - b := bitShiftLeft(index) - loc := (index - i.rangeFirstBit()) / 8 - i.bitmap[loc] = i.bitmap[loc] | b - - if index < i.firstBit { - i.firstBit = index - } - if index > i.lastBit { - i.lastBit = index - } - } else { - // Expand the bitmap - if index < i.rangeFirstBit() { - // ...to the left - newBytes := make([]byte, distance(index, i.rangeFirstBit())) - i.bitmap = append(newBytes, i.bitmap...) - b := bitShiftLeft(index) - i.bitmap[0] = i.bitmap[0] | b - - i.firstBit = index - } else if index > i.rangeLastBit() { - // ... to the right - newBytes := make([]byte, distance(i.rangeLastBit(), index)) - i.bitmap = append(i.bitmap, newBytes...) - b := bitShiftLeft(index) - loc := (index - i.rangeFirstBit()) / 8 - i.bitmap[loc] = i.bitmap[loc] | b - - i.lastBit = index - } - } - } - - return nil -} - -func (i *BitmapIndex) setInactive(index uint32) error { - // Is this index even active in the first place? - if i.firstBit == 0 || index < i.rangeFirstBit() || index > i.rangeLastBit() { - return nil // not really an error - } - - loc := (index - i.rangeFirstBit()) / 8 // which byte? - b := bitShiftLeft(index) // which bit w/in the byte? - i.bitmap[loc] &= ^b // unset only that bit - - // If unsetting this bit made the first byte empty OR we unset the earliest - // set bit, we need to find the next "first" active bit. - if loc == 0 && i.firstBit == index { - // find the next active bit to set as the start - nextBit, err := i.nextActiveBit(index) - if err == io.EOF { - i.firstBit = 0 - i.lastBit = 0 - i.bitmap = []byte{} - } else if err != nil { - return err - } else { - // Trim all (now-)empty bytes off the front. - i.bitmap = i.bitmap[distance(i.firstBit, nextBit):] - i.firstBit = nextBit - } - } else if int(loc) == len(i.bitmap)-1 { - idx := -1 - - if i.bitmap[loc] == 0 { - // find the latest non-empty byte, to set as the new "end" - j := len(i.bitmap) - 1 - for i.bitmap[j] == 0 { - j-- - } - - i.bitmap = i.bitmap[:j+1] - idx = 8 - } else if i.lastBit == index { - // Get the "bit number" of the last active bit (i.e. the one we just - // turned off) to mark the starting point for the search. - idx = 8 - if index%8 != 0 { - idx = int(index % 8) - } - } - - // Do we need to adjust the range? Imagine we had 0b0011_0100 and we - // unset the last active bit. - // ^ - // Then, we need to adjust our internal lastBit tracker to represent the - // ^ bit above. This means finding the first previous set bit. - if idx > -1 { - l := uint32(len(i.bitmap) - 1) - // Imagine we had 0b0011_0100 and we unset the last active bit. - // ^ - // Then, we need to adjust our internal lastBit tracker to represent - // the ^ bit above. This means finding the first previous set bit. - j, ok := int(idx), false - for ; j >= 0 && !ok; j-- { - _, ok = maxBitAfter(i.bitmap[l], uint32(j)) - } - - // We know from the earlier conditional that *some* bit is set, so - // we know that j represents the index of the bit that's the new - // "last active" bit. - firstByte := i.rangeFirstBit() - i.lastBit = firstByte + (l * 8) + uint32(j) + 1 - } - } - - return nil -} - -//lint:ignore U1000 Ignore unused function temporarily -func (i *BitmapIndex) isActive(index uint32) bool { - if index >= i.firstBit && index <= i.lastBit { - b := bitShiftLeft(index) - loc := (index - i.rangeFirstBit()) / 8 - return i.bitmap[loc]&b != 0 - } else { - return false - } -} - -func (i *BitmapIndex) iterate(f func(index uint32)) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if i.firstBit == 0 { - return nil - } - - f(i.firstBit) - curr := i.firstBit - - for { - var err error - curr, err = i.nextActiveBit(curr + 1) - if err != nil { - if err == io.EOF { - break - } - return err - } - - f(curr) - } - - return nil -} - -func (i *BitmapIndex) Merge(other *BitmapIndex) error { - i.mutex.Lock() - defer i.mutex.Unlock() - - var err error - other.iterate(func(index uint32) { - if err != nil { - return - } - err = i.setActive(index) - }) - - return err -} - -// NextActiveBit returns the next bit position (inclusive) where this index is -// active. "Inclusive" means that if it's already active at `position`, this -// returns `position`. -func (i *BitmapIndex) NextActiveBit(position uint32) (uint32, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - return i.nextActiveBit(position) -} - -func (i *BitmapIndex) nextActiveBit(position uint32) (uint32, error) { - if i.firstBit == 0 || position > i.lastBit { - // We're past the end. - // TODO: Should this be an error? or how should we signal NONE here? - return 0, io.EOF - } - - if position < i.firstBit { - position = i.firstBit - } - - // Must be within the range, find the first non-zero after our start - loc := (position - i.rangeFirstBit()) / 8 - - // Is it in the same byte? - if shift, ok := maxBitAfter(i.bitmap[loc], (position-1)%8); ok { - return i.rangeFirstBit() + (loc * 8) + shift, nil - } - - // Scan bytes after - loc++ - for ; loc < uint32(len(i.bitmap)); loc++ { - // Find the offset of the set bit - if shift, ok := maxBitAfter(i.bitmap[loc], 0); ok { - return i.rangeFirstBit() + (loc * 8) + shift, nil - } - } - - // all bits after this were zero - // TODO: Should this be an error? or how should we signal NONE here? - return 0, io.EOF -} - -func (i *BitmapIndex) ToXDR() xdr.BitmapIndex { - i.mutex.RLock() - defer i.mutex.RUnlock() - - return xdr.BitmapIndex{ - FirstBit: xdr.Uint32(i.firstBit), - LastBit: xdr.Uint32(i.lastBit), - Bitmap: i.bitmap, - } -} - -func (i *BitmapIndex) Buffer() *bytes.Buffer { - i.mutex.RLock() - defer i.mutex.RUnlock() - - xdrBitmap := i.ToXDR() - b, err := xdrBitmap.MarshalBinary() - if err != nil { - panic(err) - } - return bytes.NewBuffer(b) -} - -// Flush flushes the index data to byte slice in index format. -func (i *BitmapIndex) Flush() []byte { - return i.Buffer().Bytes() -} - -// DebugCompare returns a string that compares this bitmap to another bitmap -// byte-by-byte in binary form as two columns. -func (i *BitmapIndex) DebugCompare(j *BitmapIndex) string { - output := make([]string, ordered.Max(len(i.bitmap), len(j.bitmap))) - for n := 0; n < len(output); n++ { - if n < len(i.bitmap) { - output[n] += fmt.Sprintf("%08b", i.bitmap[n]) - } else { - output[n] += " " - } - - output[n] += " | " - - if n < len(j.bitmap) { - output[n] += fmt.Sprintf("%08b", j.bitmap[n]) - } - } - - return strings.Join(output, "\n") -} - -func maxBitAfter(b byte, after uint32) (uint32, bool) { - if b == 0 { - // empty byte - return 0, false - } - - for shift := uint32(after); shift < 8; shift++ { - mask := byte(0b1000_0000) >> shift - if mask&b != 0 { - return shift, true - } - } - return 0, false -} - -// distance returns how many bytes occur between the two given indices. Note -// that j >= i, otherwise the result will be negative. -func distance(i, j uint32) int { - return (int(j)-1)/8 - (int(i)-1)/8 -} diff --git a/exp/lighthorizon/index/types/bitmap_test.go b/exp/lighthorizon/index/types/bitmap_test.go deleted file mode 100644 index c5e7864872..0000000000 --- a/exp/lighthorizon/index/types/bitmap_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package index - -import ( - "fmt" - "io" - "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewFromBytes(t *testing.T) { - for i := uint32(1); i < 200; i++ { - t.Run(fmt.Sprintf("New%d", i), func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(i) - b := index.Flush() - newIndex, err := NewBitmapIndex(b) - require.NoError(t, err) - assert.Equal(t, index.firstBit, newIndex.firstBit) - assert.Equal(t, index.lastBit, newIndex.lastBit) - assert.Equal(t, index.bitmap, newIndex.bitmap) - }) - } -} - -func TestSetActive(t *testing.T) { - cases := []struct { - checkpoint uint32 - rangeFirstCheckpoint uint32 - bitmap []byte - }{ - {1, 1, []byte{0b1000_0000}}, - {2, 1, []byte{0b0100_0000}}, - {3, 1, []byte{0b0010_0000}}, - {4, 1, []byte{0b0001_0000}}, - {5, 1, []byte{0b0000_1000}}, - {6, 1, []byte{0b0000_0100}}, - {7, 1, []byte{0b0000_0010}}, - {8, 1, []byte{0b0000_0001}}, - - {9, 9, []byte{0b1000_0000}}, - {10, 9, []byte{0b0100_0000}}, - {11, 9, []byte{0b0010_0000}}, - {12, 9, []byte{0b0001_0000}}, - {13, 9, []byte{0b0000_1000}}, - {14, 9, []byte{0b0000_0100}}, - {15, 9, []byte{0b0000_0010}}, - {16, 9, []byte{0b0000_0001}}, - } - - for _, tt := range cases { - t.Run(fmt.Sprintf("init_%d", tt.checkpoint), func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(tt.checkpoint) - - assert.Equal(t, tt.bitmap, index.bitmap) - assert.Equal(t, tt.rangeFirstCheckpoint, index.rangeFirstBit()) - assert.Equal(t, tt.checkpoint, index.firstBit) - assert.Equal(t, tt.checkpoint, index.lastBit) - }) - } - - // Update current bitmap right - index := &BitmapIndex{} - index.SetActive(1) - assert.Equal(t, uint32(1), index.firstBit) - assert.Equal(t, uint32(1), index.lastBit) - index.SetActive(8) - assert.Equal(t, []byte{0b1000_0001}, index.bitmap) - assert.Equal(t, uint32(1), index.firstBit) - assert.Equal(t, uint32(8), index.lastBit) - - // Update current bitmap left - index = &BitmapIndex{} - index.SetActive(8) - assert.Equal(t, uint32(8), index.firstBit) - assert.Equal(t, uint32(8), index.lastBit) - index.SetActive(1) - assert.Equal(t, []byte{0b1000_0001}, index.bitmap) - assert.Equal(t, uint32(1), index.firstBit) - assert.Equal(t, uint32(8), index.lastBit) - - index = &BitmapIndex{} - index.SetActive(10) - index.SetActive(9) - index.SetActive(16) - assert.Equal(t, []byte{0b1100_0001}, index.bitmap) - assert.Equal(t, uint32(9), index.firstBit) - assert.Equal(t, uint32(16), index.lastBit) - - // Expand bitmap to the left - index = &BitmapIndex{} - index.SetActive(10) - index.SetActive(1) - assert.Equal(t, []byte{0b1000_0000, 0b0100_0000}, index.bitmap) - assert.Equal(t, uint32(1), index.firstBit) - assert.Equal(t, uint32(10), index.lastBit) - - index = &BitmapIndex{} - index.SetActive(17) - index.SetActive(2) - assert.Equal(t, []byte{0b0100_0000, 0b0000_0000, 0b1000_0000}, index.bitmap) - assert.Equal(t, uint32(2), index.firstBit) - assert.Equal(t, uint32(17), index.lastBit) - - // Expand bitmap to the right - index = &BitmapIndex{} - index.SetActive(1) - index.SetActive(10) - assert.Equal(t, []byte{0b1000_0000, 0b0100_0000}, index.bitmap) - assert.Equal(t, uint32(1), index.firstBit) - assert.Equal(t, uint32(10), index.lastBit) - - index = &BitmapIndex{} - index.SetActive(2) - index.SetActive(17) - assert.Equal(t, []byte{0b0100_0000, 0b0000_0000, 0b1000_0000}, index.bitmap) - assert.Equal(t, uint32(2), index.firstBit) - assert.Equal(t, uint32(17), index.lastBit) - - index = &BitmapIndex{} - index.SetActive(17) - index.SetActive(26) - assert.Equal(t, []byte{0b1000_0000, 0b0100_0000}, index.bitmap) - assert.Equal(t, uint32(17), index.firstBit) - assert.Equal(t, uint32(26), index.lastBit) -} - -// TestSetInactive ensures that you can flip active bits off and the bitmap -// compresses in size accordingly. -func TestSetInactive(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(17) - index.SetActive(17 + 9) - index.SetActive(17 + 9 + 10) - assert.Equal(t, []byte{0b1000_0000, 0b0100_0000, 0b0001_0000}, index.bitmap) - - // disabling bits should work - index.SetInactive(17) - assert.False(t, index.isActive(17)) - - // it should trim off the first byte now - assert.Equal(t, []byte{0b0100_0000, 0b0001_0000}, index.bitmap) - assert.EqualValues(t, 17+9, index.firstBit) - assert.EqualValues(t, 17+9+10, index.lastBit) - - // it should compress empty bytes on shrink - index = &BitmapIndex{} - index.SetActive(1) - index.SetActive(1 + 2) - index.SetActive(1 + 9) - index.SetActive(1 + 9 + 8 + 9) - assert.Equal(t, []byte{0b1010_0000, 0b0100_0000, 0b0000_0000, 0b0010_0000}, index.bitmap) - - // ...from the left - index.SetInactive(1) - assert.Equal(t, []byte{0b0010_0000, 0b0100_0000, 0b0000_0000, 0b0010_0000}, index.bitmap) - index.SetInactive(3) - assert.Equal(t, []byte{0b0100_0000, 0b0000_0000, 0b0010_0000}, index.bitmap) - assert.EqualValues(t, 1+9, index.firstBit) - assert.EqualValues(t, 1+9+8+9, index.lastBit) - - // ...and the right - index.SetInactive(1 + 9 + 8 + 9) - assert.Equal(t, []byte{0b0100_0000}, index.bitmap) - assert.EqualValues(t, 1+9, index.firstBit) - assert.EqualValues(t, 1+9, index.lastBit) - - // ensure right-hand compression it works for multiple bytes, too - index = &BitmapIndex{} - index.SetActive(2) - index.SetActive(2 + 2) - index.SetActive(2 + 9) - index.SetActive(2 + 9 + 8 + 6) - index.SetActive(2 + 9 + 8 + 9) - index.SetActive(2 + 9 + 8 + 10) - assert.Equal(t, []byte{0b0101_0000, 0b0010_0000, 0b0000_0000, 0b1001_1000}, index.bitmap) - - index.setInactive(2 + 9 + 8 + 10) - assert.Equal(t, []byte{0b0101_0000, 0b0010_0000, 0b0000_0000, 0b1001_0000}, index.bitmap) - assert.EqualValues(t, 2+9+8+9, index.lastBit) - - index.setInactive(2 + 9 + 8 + 9) - assert.Equal(t, []byte{0b0101_0000, 0b0010_0000, 0b0000_0000, 0b1000_0000}, index.bitmap) - assert.EqualValues(t, 2+9+8+6, index.lastBit) - - index.setInactive(2 + 9 + 8 + 6) - assert.Equal(t, []byte{0b0101_0000, 0b0010_0000}, index.bitmap) - assert.EqualValues(t, 2, index.firstBit) - assert.EqualValues(t, 2+9, index.lastBit) - - index.setInactive(2 + 2) - assert.Equal(t, []byte{0b0100_0000, 0b0010_0000}, index.bitmap) - assert.EqualValues(t, 2, index.firstBit) - assert.EqualValues(t, 2+9, index.lastBit) - - index.setInactive(1) // should be a no-op - assert.Equal(t, []byte{0b0100_0000, 0b0010_0000}, index.bitmap) - assert.EqualValues(t, 2, index.firstBit) - assert.EqualValues(t, 2+9, index.lastBit) -} - -// TestFuzzerSetInactive attempt to fuzz random bits into two bitmap sets, one -// by addition, and one by subtraction - then, it compares the outcome. -func TestFuzzySetUnset(t *testing.T) { - permLen := uint32(128) // should be a multiple of 8 - setBitsCount := permLen / 2 - - for n := 0; n < 10_000; n++ { - randBits := rand.Perm(int(permLen)) - setBits := randBits[:setBitsCount] - clearBits := randBits[setBitsCount:] - - // set all first, then clear the others - clearBitmap := &BitmapIndex{} - for i := uint32(1); i <= permLen; i++ { - clearBitmap.setActive(i) - } - - setBitmap := &BitmapIndex{} - for i := range setBits { - setBitmap.setActive(uint32(setBits[i]) + 1) - clearBitmap.setInactive(uint32(clearBits[i]) + 1) - } - - require.Equalf(t, setBitmap, clearBitmap, - "bitmaps aren't equal:\n%s", setBitmap.DebugCompare(clearBitmap)) - } -} - -func TestNextActive(t *testing.T) { - t.Run("empty", func(t *testing.T) { - index := &BitmapIndex{} - - i, err := index.NextActiveBit(0) - assert.Equal(t, uint32(0), i) - assert.EqualError(t, err, io.EOF.Error()) - }) - - t.Run("one byte", func(t *testing.T) { - t.Run("after last", func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(3) - - // 16 is well-past the end - i, err := index.NextActiveBit(16) - assert.Equal(t, uint32(0), i) - assert.EqualError(t, err, io.EOF.Error()) - }) - - t.Run("only one bit in the byte", func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(1) - - i, err := index.NextActiveBit(1) - assert.NoError(t, err) - assert.Equal(t, uint32(1), i) - }) - - t.Run("only one bit in the byte (offset)", func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(9) - - i, err := index.NextActiveBit(1) - assert.NoError(t, err) - assert.Equal(t, uint32(9), i) - }) - - severalSet := &BitmapIndex{} - severalSet.SetActive(9) - severalSet.SetActive(11) - - t.Run("several bits set (first)", func(t *testing.T) { - i, err := severalSet.NextActiveBit(9) - assert.NoError(t, err) - assert.Equal(t, uint32(9), i) - }) - - t.Run("several bits set (second)", func(t *testing.T) { - i, err := severalSet.NextActiveBit(10) - assert.NoError(t, err) - assert.Equal(t, uint32(11), i) - }) - - t.Run("several bits set (second, inclusive)", func(t *testing.T) { - i, err := severalSet.NextActiveBit(11) - assert.NoError(t, err) - assert.Equal(t, uint32(11), i) - }) - }) - - t.Run("many bytes", func(t *testing.T) { - index := &BitmapIndex{} - index.SetActive(9) - index.SetActive(129) - - // Before the first - i, err := index.NextActiveBit(8) - assert.NoError(t, err) - assert.Equal(t, uint32(9), i) - - // at the first - i, err = index.NextActiveBit(9) - assert.NoError(t, err) - assert.Equal(t, uint32(9), i) - - // In the middle - i, err = index.NextActiveBit(11) - assert.NoError(t, err) - assert.Equal(t, uint32(129), i) - - // At the end - i, err = index.NextActiveBit(129) - assert.NoError(t, err) - assert.Equal(t, uint32(129), i) - - // after the end - i, err = index.NextActiveBit(130) - assert.EqualError(t, err, io.EOF.Error()) - assert.Equal(t, uint32(0), i) - }) -} - -func TestMaxBitAfter(t *testing.T) { - for _, tc := range []struct { - b byte - after uint32 - shift uint32 - ok bool - }{ - {0b0000_0000, 0, 0, false}, - {0b0000_0000, 1, 0, false}, - {0b1000_0000, 0, 0, true}, - {0b0100_0000, 0, 1, true}, - {0b0100_0000, 1, 1, true}, - {0b0010_1000, 0, 2, true}, - {0b0010_1000, 1, 2, true}, - {0b0010_1000, 2, 2, true}, - {0b0010_1000, 3, 4, true}, - {0b0010_1000, 4, 4, true}, - {0b0000_0001, 7, 7, true}, - } { - t.Run(fmt.Sprintf("0b%b,%d", tc.b, tc.after), func(t *testing.T) { - shift, ok := maxBitAfter(tc.b, tc.after) - assert.Equal(t, tc.ok, ok) - assert.Equal(t, tc.shift, shift) - }) - } -} - -func TestMerge(t *testing.T) { - a := &BitmapIndex{} - require.NoError(t, a.SetActive(9)) - require.NoError(t, a.SetActive(129)) - - b := &BitmapIndex{} - require.NoError(t, b.SetActive(900)) - require.NoError(t, b.SetActive(1000)) - - var checkpoints []uint32 - b.iterate(func(c uint32) { - checkpoints = append(checkpoints, c) - }) - - assert.Equal(t, []uint32{900, 1000}, checkpoints) - - require.NoError(t, a.Merge(b)) - - assert.True(t, a.isActive(9)) - assert.True(t, a.isActive(129)) - assert.True(t, a.isActive(900)) - assert.True(t, a.isActive(1000)) - - checkpoints = []uint32{} - a.iterate(func(c uint32) { - checkpoints = append(checkpoints, c) - }) - - assert.Equal(t, []uint32{9, 129, 900, 1000}, checkpoints) -} diff --git a/exp/lighthorizon/index/types/trie.go b/exp/lighthorizon/index/types/trie.go deleted file mode 100644 index b5fc39c0ca..0000000000 --- a/exp/lighthorizon/index/types/trie.go +++ /dev/null @@ -1,345 +0,0 @@ -package index - -import ( - "bufio" - "encoding" - "io" - "sync" - - "github.com/stellar/go/xdr" -) - -const ( - TrieIndexVersion = 1 - - HeaderHasPrefix = 0b0000_0001 - HeaderHasValue = 0b0000_0010 - HeaderHasChildren = 0b0000_0100 -) - -type TrieIndex struct { - sync.RWMutex - Root *trieNode `json:"root"` -} - -// TODO: Store the suffix here so we can truncate the branches -type trieNode struct { - // Common prefix we ignore - Prefix []byte `json:"prefix,omitempty"` - - // The value of this node. - Value []byte `json:"value,omitempty"` - - // Any children of this node, mapped by the next byte of their path - Children map[byte]*trieNode `json:"children,omitempty"` -} - -func NewTrieIndexFromBytes(r io.Reader) (*TrieIndex, error) { - var index TrieIndex - if _, err := index.ReadFrom(r); err != nil { - return nil, err - } - return &index, nil -} - -func (index *TrieIndex) Upsert(key, value []byte) ([]byte, bool) { - if len(key) == 0 { - panic("len(key) must be > 0") - } - index.Lock() - defer index.Unlock() - return index.doUpsert(key, value) -} - -func (index *TrieIndex) doUpsert(key, value []byte) ([]byte, bool) { - if index.Root == nil { - index.Root = &trieNode{Prefix: key, Value: value} - return nil, false - } - - node := index.Root - var parent *trieNode - var parentIdx byte - splitPos := 0 - for len(key) > 0 { - for splitPos < len(node.Prefix) && len(key) > 0 { - if node.Prefix[splitPos] != key[0] { - break - } - splitPos++ - key = key[1:] - } - if splitPos != len(node.Prefix) { - // split this node - break - } - if len(key) == 0 { - // simple update-in-place at this node - break - } - - // Jump to the next child - parent = node - parentIdx = key[0] - child, ok := node.Children[key[0]] - if !ok { - if node.Children == nil { - node.Children = map[byte]*trieNode{} - } - // child doesn't exist. Insert a new node - node.Children[key[0]] = &trieNode{ - Prefix: key[1:], - Value: value, - } - return nil, false - } - node = child - key = key[1:] - splitPos = 0 - } - - // Key fully consumed just as we reached "node" - if len(key) == 0 { - if splitPos == len(node.Prefix) { - // node prefix matches (or is none), simple update-in-place - prev := node.Value - node.Value = value - return prev, true - } else { - // node has a prefix, so we need to insert a new one here and push it down - splitNode := &trieNode{ - Prefix: node.Prefix[:splitPos], // the matching segment - Value: value, - Children: map[byte]*trieNode{}, - } - splitNode.Children[node.Prefix[splitPos]] = node - node.Prefix = node.Prefix[splitPos+1:] // existing part that didn't match - if parent == nil { - index.Root = splitNode - } else { - parent.Children[parentIdx] = splitNode - } - return nil, false - } - } else { - // leftover key - if splitPos == len(node.Prefix) { - // new child - node.Children[key[0]] = &trieNode{ - Prefix: key[1:], - Value: value, - } - return nil, false - } else { - // Need to split the node - splitNode := &trieNode{ - Prefix: node.Prefix[:splitPos], - Children: map[byte]*trieNode{}, - } - splitNode.Children[node.Prefix[splitPos]] = node - splitNode.Children[key[0]] = &trieNode{Prefix: key[1:], Value: value} - node.Prefix = node.Prefix[splitPos+1:] - if parent == nil { - index.Root = splitNode - } else { - parent.Children[parentIdx] = splitNode - } - return nil, false - } - } -} - -func (index *TrieIndex) Get(key []byte) ([]byte, bool) { - index.RLock() - defer index.RUnlock() - if index.Root == nil { - return nil, false - } - - node := index.Root - splitPos := 0 - for len(key) > 0 { - for splitPos < len(node.Prefix) && len(key) > 0 { - if node.Prefix[splitPos] != key[0] { - break - } - splitPos++ - key = key[1:] - } - if splitPos != len(node.Prefix) { - // split this node - break - } - if len(key) == 0 { - // found it - return node.Value, true - } - - // Jump to the next child - child, ok := node.Children[key[0]] - if !ok { - // child doesn't exist - return nil, false - } - node = child - key = key[1:] - splitPos = 0 - } - - if len(key) == 0 { - return node.Value, true - } - return nil, false -} - -func (index *TrieIndex) Iterate(f func(key, value []byte)) { - index.RLock() - defer index.RUnlock() - if index.Root != nil { - index.Root.iterate(nil, f) - } -} - -func (node *trieNode) iterate(prefix []byte, f func(key, value []byte)) { - key := append(prefix, node.Prefix...) - if len(node.Value) > 0 { - f(key, node.Value) - } - - if node.Children != nil { - for b, child := range node.Children { - child.iterate(append(key, b), f) - } - } -} - -// TODO: For now this ignores duplicates. should it error? -func (i *TrieIndex) Merge(other *TrieIndex) error { - i.Lock() - defer i.Unlock() - - other.Iterate(func(key, value []byte) { - i.doUpsert(key, value) - }) - - return nil -} - -func (i *TrieIndex) MarshalBinary() ([]byte, error) { - i.RLock() - defer i.RUnlock() - - xdrRoot := xdr.TrieNode{} - - // Apparently this is possible? - if i.Root != nil { - xdrRoot.Prefix = i.Root.Prefix - xdrRoot.Value = i.Root.Value - xdrRoot.Children = make([]xdr.TrieNodeChild, 0, len(i.Root.Children)) - - for key, node := range i.Root.Children { - buildXdrTrie(key, node, &xdrRoot) - } - } - - xdrIndex := xdr.TrieIndex{Version: TrieIndexVersion, Root: xdrRoot} - return xdrIndex.MarshalBinary() -} - -func (i *TrieIndex) WriteTo(w io.Writer) (int64, error) { - i.RLock() - defer i.RUnlock() - - bytes, err := i.MarshalBinary() - if err != nil { - return int64(len(bytes)), err - } - - count, err := w.Write(bytes) - return int64(count), err -} - -func (i *TrieIndex) UnmarshalBinary(bytes []byte) error { - i.RLock() - defer i.RUnlock() - - xdrIndex := xdr.TrieIndex{} - err := xdrIndex.UnmarshalBinary(bytes) - if err != nil { - return err - } - - i.Root = &trieNode{ - Prefix: xdrIndex.Root.Prefix, - Value: xdrIndex.Root.Value, - Children: make(map[byte]*trieNode, len(xdrIndex.Root.Children)), - } - - for _, node := range xdrIndex.Root.Children { - buildTrie(&node, i.Root) - } - - return nil -} - -func (i *TrieIndex) ReadFrom(r io.Reader) (int64, error) { - i.RLock() - defer i.RUnlock() - - br := bufio.NewReader(r) - bytes, err := io.ReadAll(br) - if err != nil { - return int64(len(bytes)), err - } - - return int64(len(bytes)), i.UnmarshalBinary(bytes) -} - -// buildTrie recursively builds the equivalent `TrieNode` structure from raw -// XDR, creating the key->value child mapping from the flat list of children. -// Here, `xdrNode` is the node we're processing and `parent` is its non-XDR -// parent (i.e. the parent was already converted from XDR). -// -// This is the opposite of buildXdrTrie. -func buildTrie(xdrNode *xdr.TrieNodeChild, parent *trieNode) { - node := &trieNode{ - Prefix: xdrNode.Node.Prefix, - Value: xdrNode.Node.Value, - Children: make(map[byte]*trieNode, len(xdrNode.Node.Children)), - } - parent.Children[xdrNode.Key[0]] = node - - for _, child := range xdrNode.Node.Children { - buildTrie(&child, node) - } -} - -// buildXdrTrie recursively builds the XDR-equivalent TrieNode structure, where -// `i` is the node we're converting and `parent` is the already-converted -// parent. That is, the non-XDR version of `parent` should have had (`key`, `i`) -// as a child. -// -// This is the opposite of buildTrie. -func buildXdrTrie(key byte, node *trieNode, parent *xdr.TrieNode) { - self := xdr.TrieNode{ - Prefix: node.Prefix, - Value: node.Value, - Children: make([]xdr.TrieNodeChild, 0, len(node.Children)), - } - - for key, node := range node.Children { - buildXdrTrie(key, node, &self) - } - - parent.Children = append(parent.Children, xdr.TrieNodeChild{ - Key: [1]byte{key}, - Node: self, - }) -} - -// Ensure we're compatible with stdlib interfaces. -var _ io.WriterTo = &TrieIndex{} -var _ io.ReaderFrom = &TrieIndex{} - -var _ encoding.BinaryMarshaler = &TrieIndex{} -var _ encoding.BinaryUnmarshaler = &TrieIndex{} diff --git a/exp/lighthorizon/index/types/trie_test.go b/exp/lighthorizon/index/types/trie_test.go deleted file mode 100644 index 8745296429..0000000000 --- a/exp/lighthorizon/index/types/trie_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package index - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "math/rand" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func randomTrie(t *testing.T, index *TrieIndex) (*TrieIndex, map[string]uint32) { - if index == nil { - index = &TrieIndex{} - } - inserts := map[string]uint32{} - numInserts := rand.Intn(100) - for j := 0; j < numInserts; j++ { - ledger := uint32(rand.Int63()) - hashBytes := make([]byte, 32) - if _, err := rand.Read(hashBytes); err != nil { - assert.NoError(t, err) - } - hash := hex.EncodeToString(hashBytes) - - inserts[hash] = ledger - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, ledger) - index.Upsert([]byte(hash), b) - } - return index, inserts -} - -func TestTrieIndex(t *testing.T) { - for i := 0; i < 10_000; i++ { - index, inserts := randomTrie(t, nil) - - for key, expected := range inserts { - value, ok := index.Get([]byte(key)) - require.Truef(t, ok, "Key not found: %s", key) - ledger := binary.BigEndian.Uint32(value) - assert.Equalf(t, expected, ledger, - "Key %s found: %v, expected: %v", key, ledger, expected) - } - } -} - -func TestTrieIndexUpsertBasic(t *testing.T) { - index := &TrieIndex{} - - key := "key" - prev, ok := index.Upsert([]byte(key), []byte("a")) - assert.Nil(t, prev) - assert.Falsef(t, ok, "expected nil, got prev: %q", string(prev)) - - prev, ok = index.Upsert([]byte(key), []byte("b")) - assert.Equal(t, "a", string(prev)) - assert.Truef(t, ok, "expected 'a', got prev: %q", string(prev)) - - prev, ok = index.Upsert([]byte(key), []byte("c")) - assert.Equal(t, "b", string(prev)) - assert.Truef(t, ok, "expected 'b', got prev: %q", string(prev)) -} - -func TestTrieIndexSuffixes(t *testing.T) { - index := &TrieIndex{} - - prev, ok := index.Upsert([]byte("a"), []byte("a")) - require.False(t, ok) - require.Nil(t, prev) - - prev, ok = index.Upsert([]byte("ab"), []byte("ab")) - require.False(t, ok) - require.Nil(t, prev) - - prev, ok = index.Get([]byte("a")) - require.True(t, ok) - require.Equal(t, "a", string(prev)) - - prev, ok = index.Get([]byte("ab")) - require.True(t, ok) - require.Equal(t, "ab", string(prev)) - - prev, ok = index.Upsert([]byte("a"), []byte("b")) - require.True(t, ok) - require.Equal(t, "a", string(prev)) - - prev, ok = index.Get([]byte("a")) - require.True(t, ok) - require.Equal(t, "b", string(prev)) -} - -func TestTrieIndexSerialization(t *testing.T) { - for i := 0; i < 10_000; i++ { - t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { - index, inserts := randomTrie(t, nil) - - // Round-trip it to serialization and back - buf := &bytes.Buffer{} - nWritten, err := index.WriteTo(buf) - assert.NoError(t, err) - - read := &TrieIndex{} - nRead, err := read.ReadFrom(buf) - assert.NoError(t, err) - - assert.Equal(t, nWritten, nRead, "read more or less than we wrote") - - for key, expected := range inserts { - value, ok := read.Get([]byte(key)) - require.Truef(t, ok, "Key not found: %s", key) - - ledger := binary.BigEndian.Uint32(value) - assert.Equal(t, expected, ledger, "for key %s", key) - } - }) - } -} - -func requireEqualNodes(t *testing.T, expectedNode, gotNode *trieNode) { - expectedJSON, err := json.Marshal(expectedNode) - require.NoError(t, err) - expected := map[string]interface{}{} - require.NoError(t, json.Unmarshal(expectedJSON, &expected)) - - gotJSON, err := json.Marshal(gotNode) - require.NoError(t, err) - got := map[string]interface{}{} - require.NoError(t, json.Unmarshal(gotJSON, &got)) - - require.Equal(t, expected, got) -} - -func TestTrieIndexUpsertAdvanced(t *testing.T) { - // TODO: This is janky that we inspect the structure, but I want to make sure - // I've gotten the algorithms correct. - makeBase := func() *TrieIndex { - index := &TrieIndex{} - index.Upsert([]byte("annibale"), []byte{1}) - index.Upsert([]byte("annibalesco"), []byte{2}) - return index - } - - t.Run("base", func(t *testing.T) { - base := makeBase() - - baseExpected := &trieNode{ - Prefix: []byte("annibale"), - Value: []byte{1}, - Children: map[byte]*trieNode{ - byte('s'): { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - } - requireEqualNodes(t, baseExpected, base.Root) - }) - - for _, tc := range []struct { - key string - expected *trieNode - }{ - {"annientare", &trieNode{ - Prefix: []byte("anni"), - Children: map[byte]*trieNode{ - 'b': { - Prefix: []byte("ale"), - Value: []byte{1}, - Children: map[byte]*trieNode{ - 's': { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - }, - 'e': { - Prefix: []byte("ntare"), - Value: []byte{3}, - }, - }, - }}, - {"annibali", &trieNode{ - Prefix: []byte("annibal"), - Children: map[byte]*trieNode{ - 'e': { - Value: []byte{1}, - Children: map[byte]*trieNode{ - 's': { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - }, - 'i': { - Value: []byte{3}, - }, - }, - }}, - {"ago", &trieNode{ - Prefix: []byte("a"), - Children: map[byte]*trieNode{ - 'n': { - Prefix: []byte("nibale"), - Value: []byte{1}, - Children: map[byte]*trieNode{ - 's': { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - }, - 'g': { - Prefix: []byte("o"), - Value: []byte{3}, - }, - }, - }}, - {"ciao", &trieNode{ - Children: map[byte]*trieNode{ - 'a': { - Prefix: []byte("nnibale"), - Value: []byte{1}, - Children: map[byte]*trieNode{ - 's': { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - }, - 'c': { - Prefix: []byte("iao"), - Value: []byte{3}, - }, - }, - }}, - {"anni", &trieNode{ - Prefix: []byte("anni"), - Value: []byte{3}, - Children: map[byte]*trieNode{ - 'b': { - Prefix: []byte("ale"), - Value: []byte{1}, - Children: map[byte]*trieNode{ - 's': { - Prefix: []byte("co"), - Value: []byte{2}, - }, - }, - }, - }, - }}, - } { - t.Run(tc.key, func(t *testing.T) { - // Do our upsert - index := makeBase() - index.Upsert([]byte(tc.key), []byte{3}) - - // Check the tree is shaped right - requireEqualNodes(t, tc.expected, index.Root) - - // Check the value matches expected - value, ok := index.Get([]byte(tc.key)) - require.True(t, ok) - require.Equal(t, []byte{3}, value) - }) - } -} - -func TestTrieIndexMerge(t *testing.T) { - for i := 0; i < 10_000; i++ { - a, aInserts := randomTrie(t, nil) - b, bInserts := randomTrie(t, nil) - - require.NoError(t, a.Merge(b)) - - // Should still have all the A keys - for key, expected := range aInserts { - value, ok := a.Get([]byte(key)) - require.Truef(t, ok, "Key not found: %s", key) - ledger := binary.BigEndian.Uint32(value) - assert.Equalf(t, expected, ledger, "Key %s found", key) - } - - // Should now also have all the B keys - for key, expected := range bInserts { - value, ok := a.Get([]byte(key)) - require.Truef(t, ok, "Key not found: %s", key) - ledger := binary.BigEndian.Uint32(value) - assert.Equalf(t, expected, ledger, "Key %s found", key) - } - } -} diff --git a/exp/lighthorizon/ingester/ingester.go b/exp/lighthorizon/ingester/ingester.go deleted file mode 100644 index 21bb400b50..0000000000 --- a/exp/lighthorizon/ingester/ingester.go +++ /dev/null @@ -1,55 +0,0 @@ -package ingester - -import ( - "context" - - "github.com/stellar/go/ingest" - "github.com/stellar/go/metaarchive" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/xdr" -) - -type IngesterConfig struct { - SourceUrl string - NetworkPassphrase string - - CacheDir string - CacheSize int - - ParallelDownloads uint -} - -type liteIngester struct { - metaarchive.MetaArchive - networkPassphrase string -} - -func (i *liteIngester) PrepareRange(ctx context.Context, r historyarchive.Range) error { - return nil -} - -func (i *liteIngester) NewLedgerTransactionReader( - ledgerCloseMeta xdr.SerializedLedgerCloseMeta, -) (LedgerTransactionReader, error) { - reader, err := ingest.NewLedgerTransactionReaderFromLedgerCloseMeta( - i.networkPassphrase, - ledgerCloseMeta.MustV0()) - - return &liteLedgerTransactionReader{reader}, err -} - -type liteLedgerTransactionReader struct { - *ingest.LedgerTransactionReader -} - -func (reader *liteLedgerTransactionReader) Read() (LedgerTransaction, error) { - ingestedTx, err := reader.LedgerTransactionReader.Read() - if err != nil { - return LedgerTransaction{}, err - } - return LedgerTransaction{LedgerTransaction: &ingestedTx}, nil -} - -var _ Ingester = (*liteIngester)(nil) // ensure conformity to the interface -var _ LedgerTransactionReader = (*liteLedgerTransactionReader)(nil) diff --git a/exp/lighthorizon/ingester/main.go b/exp/lighthorizon/ingester/main.go deleted file mode 100644 index a93636c67a..0000000000 --- a/exp/lighthorizon/ingester/main.go +++ /dev/null @@ -1,87 +0,0 @@ -package ingester - -import ( - "context" - "fmt" - "net/url" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/ingest" - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" - "github.com/stellar/go/support/storage" - "github.com/stellar/go/xdr" -) - -// -// LightHorizon data model -// - -// Ingester combines a source of unpacked ledger metadata and a way to create a -// ingestion reader interface on top of it. -type Ingester interface { - metaarchive.MetaArchive - - PrepareRange(ctx context.Context, r historyarchive.Range) error - NewLedgerTransactionReader( - ledgerCloseMeta xdr.SerializedLedgerCloseMeta, - ) (LedgerTransactionReader, error) -} - -// For now, this mirrors the `ingest` library exactly, but it's replicated so -// that we can diverge in the future if necessary. -type LedgerTransaction struct { - *ingest.LedgerTransaction -} - -type LedgerTransactionReader interface { - Read() (LedgerTransaction, error) -} - -func NewIngester(config IngesterConfig) (Ingester, error) { - if config.CacheSize <= 0 { - return nil, fmt.Errorf("invalid cache size: %d", config.CacheSize) - } - - // Now, set up a simple filesystem-like access to the backend and wrap it in - // a local on-disk LRU cache if we can. - source, err := historyarchive.ConnectBackend( - config.SourceUrl, - storage.ConnectOptions{Context: context.Background()}, - ) - if err != nil { - return nil, errors.Wrapf(err, "failed to connect to %s", config.SourceUrl) - } - - parsed, err := url.Parse(config.SourceUrl) - if err != nil { - return nil, errors.Wrapf(err, "%s is not a valid URL", config.SourceUrl) - } - - if parsed.Scheme != "file" { // otherwise, already on-disk - cache, errr := storage.MakeOnDiskCache(source, config.CacheDir, uint(config.CacheSize)) - - if errr != nil { // non-fatal: warn but continue w/o cache - log.WithField("path", config.CacheDir).WithError(errr). - Warnf("Failed to create cached ledger backend") - } else { - log.WithField("path", config.CacheDir). - Infof("On-disk cache configured") - source = cache - } - } - - if config.ParallelDownloads > 1 { - log.Infof("Enabling parallel ledger fetches with %d workers", config.ParallelDownloads) - return NewParallelIngester( - metaarchive.NewMetaArchive(source), - config.NetworkPassphrase, - config.ParallelDownloads), nil - } - - return &liteIngester{ - MetaArchive: metaarchive.NewMetaArchive(source), - networkPassphrase: config.NetworkPassphrase, - }, nil -} diff --git a/exp/lighthorizon/ingester/mock_ingester.go b/exp/lighthorizon/ingester/mock_ingester.go deleted file mode 100644 index 62c377ce78..0000000000 --- a/exp/lighthorizon/ingester/mock_ingester.go +++ /dev/null @@ -1,44 +0,0 @@ -package ingester - -import ( - "context" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/xdr" - "github.com/stretchr/testify/mock" -) - -type MockIngester struct { - mock.Mock -} - -func (m *MockIngester) NewLedgerTransactionReader( - ledgerCloseMeta xdr.SerializedLedgerCloseMeta, -) (LedgerTransactionReader, error) { - args := m.Called(ledgerCloseMeta) - return args.Get(0).(LedgerTransactionReader), args.Error(1) -} - -func (m *MockIngester) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { - args := m.Called(ctx) - return args.Get(0).(uint32), args.Error(1) -} - -func (m *MockIngester) GetLedger(ctx context.Context, sequence uint32) (xdr.SerializedLedgerCloseMeta, error) { - args := m.Called(ctx, sequence) - return args.Get(0).(xdr.SerializedLedgerCloseMeta), args.Error(1) -} - -func (m *MockIngester) PrepareRange(ctx context.Context, r historyarchive.Range) error { - args := m.Called(ctx, r) - return args.Error(0) -} - -type MockLedgerTransactionReader struct { - mock.Mock -} - -func (m *MockLedgerTransactionReader) Read() (LedgerTransaction, error) { - args := m.Called() - return args.Get(0).(LedgerTransaction), args.Error(1) -} diff --git a/exp/lighthorizon/ingester/parallel_ingester.go b/exp/lighthorizon/ingester/parallel_ingester.go deleted file mode 100644 index 133b0a37c4..0000000000 --- a/exp/lighthorizon/ingester/parallel_ingester.go +++ /dev/null @@ -1,141 +0,0 @@ -package ingester - -import ( - "context" - "sync" - "time" - - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/support/log" - "github.com/stellar/go/xdr" -) - -type parallelIngester struct { - liteIngester - - ledgerFeed sync.Map // thread-safe version of map[uint32]downloadState - ledgerQueue set.ISet[uint32] - - workQueue chan uint32 - signalChan chan error -} - -type downloadState struct { - ledger xdr.SerializedLedgerCloseMeta - err error -} - -// NewParallelIngester creates an ingester on the given `ledgerSource` using the -// given `networkPassphrase` that can download ledgers in parallel via -// `workerCount` workers via `PrepareRange()`. -func NewParallelIngester( - archive metaarchive.MetaArchive, - networkPassphrase string, - workerCount uint, -) *parallelIngester { - self := ¶llelIngester{ - liteIngester: liteIngester{ - MetaArchive: archive, - networkPassphrase: networkPassphrase, - }, - ledgerFeed: sync.Map{}, - ledgerQueue: set.NewSafeSet[uint32](64), - workQueue: make(chan uint32, workerCount), - signalChan: make(chan error), - } - - // These are the workers that download & store ledgers in memory. - for j := uint(0); j < workerCount; j++ { - go func(jj uint) { - for ledgerSeq := range self.workQueue { - start := time.Now() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - txmeta, err := self.liteIngester.GetLedger(ctx, ledgerSeq) - cancel() - - log.WithField("duration", time.Since(start)). - WithField("worker", jj).WithError(err). - Debugf("Downloaded ledger %d", ledgerSeq) - - self.ledgerFeed.Store(ledgerSeq, downloadState{txmeta, err}) - self.signalChan <- err - } - }(j) - } - - return self -} - -// PrepareRange will create a set of parallel worker routines that feed ledgers -// to a channel in the order they're downloaded and store the results in an -// array. You can use this to download ledgers in parallel to fetching them -// individually via `GetLedger()`. `PrepareRange()` is thread-safe. -// -// Note: The passed in range `r` is inclusive of the boundaries. -func (i *parallelIngester) PrepareRange(ctx context.Context, r historyarchive.Range) error { - // The taskmaster adds ledger sequence numbers to the work queue. - go func() { - start := time.Now() - defer func() { - log.WithField("duration", time.Since(start)). - WithError(ctx.Err()). - Infof("Download of ledger range: [%d, %d] (%d ledgers) complete", - r.Low, r.High, r.Size()) - }() - - for seq := r.Low; seq <= r.High; seq++ { - if ctx.Err() != nil { - log.Warnf("Cancelling remaining downloads ([%d, %d]): %v", - seq, r.High, ctx.Err()) - break - } - - // Adding this to the "set of ledgers being downloaded in parallel" - // means that if a GetLedger() request happens in this range but - // outside of the realm of processing, it can be prioritized by the - // normal, direct download. - i.ledgerQueue.Add(seq) - - i.workQueue <- seq // blocks until there's an available worker - - // We don't remove from the queue here, preferring to remove when - // it's actually pulled from the worker. Removing here would mean - // you could have multiple instances of a ledger download happening. - } - }() - - return nil -} - -func (i *parallelIngester) GetLedger( - ctx context.Context, ledgerSeq uint32, -) (xdr.SerializedLedgerCloseMeta, error) { - // If the requested ledger is out of the queued up ranges, we can fall back - // to the default non-parallel download method. - if !i.ledgerQueue.Contains(ledgerSeq) { - return i.liteIngester.GetLedger(ctx, ledgerSeq) - } - - // If the ledger isn't available yet, wait for the download worker. - var err error - for err == nil { - if iState, ok := i.ledgerFeed.Load(ledgerSeq); ok { - state := iState.(downloadState) - i.ledgerFeed.Delete(ledgerSeq) - i.ledgerQueue.Remove(ledgerSeq) - return state.ledger, state.err - } - - select { - case err = <-i.signalChan: // blocks until another ledger downloads - case <-ctx.Done(): - err = ctx.Err() - } - } - - return xdr.SerializedLedgerCloseMeta{}, err -} - -var _ Ingester = (*parallelIngester)(nil) // ensure conformity to the interface diff --git a/exp/lighthorizon/ingester/participants.go b/exp/lighthorizon/ingester/participants.go deleted file mode 100644 index ebc49173cf..0000000000 --- a/exp/lighthorizon/ingester/participants.go +++ /dev/null @@ -1,35 +0,0 @@ -package ingester - -import ( - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/xdr" -) - -// GetTransactionParticipants takes a LedgerTransaction and returns a set of all -// participants (accounts) in the transaction. If there is any error, it will -// return nil and the error. -func GetTransactionParticipants(tx LedgerTransaction) (set.Set[string], error) { - participants, err := index.GetTransactionParticipants(*tx.LedgerTransaction) - if err != nil { - return nil, err - } - set := set.NewSet[string](len(participants)) - set.AddSlice(participants) - return set, nil -} - -// GetOperationParticipants takes a LedgerTransaction, the Operation within the -// transaction, and the 0-based index of the operation within the transaction. -// It will return a set of all participants (accounts) in the operation. If -// there is any error, it will return nil and the error. -func GetOperationParticipants(tx LedgerTransaction, op xdr.Operation, opIndex int) (set.Set[string], error) { - participants, err := index.GetOperationParticipants(*tx.LedgerTransaction, op, opIndex) - if err != nil { - return nil, err - } - - set := set.NewSet[string](len(participants)) - set.AddSlice(participants) - return set, nil -} diff --git a/exp/lighthorizon/main.go b/exp/lighthorizon/main.go deleted file mode 100644 index f7c502d465..0000000000 --- a/exp/lighthorizon/main.go +++ /dev/null @@ -1,183 +0,0 @@ -package main - -import ( - "context" - "net/http" - - "github.com/go-chi/chi" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/stellar/go/exp/lighthorizon/actions" - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/exp/lighthorizon/services" - "github.com/stellar/go/exp/lighthorizon/tools" - - "github.com/stellar/go/network" - "github.com/stellar/go/support/log" -) - -const ( - HorizonLiteVersion = "0.0.1-alpha" - defaultCacheSize = (60 * 60 * 24) / 6 // 1 day of ledgers @ 6s each -) - -func main() { - log.SetLevel(logrus.InfoLevel) // default for subcommands - - cmd := &cobra.Command{ - Use: "lighthorizon ", - Long: "Horizon Lite command suite", - RunE: func(cmd *cobra.Command, args []string) error { - return cmd.Usage() // require a subcommand - }, - } - - serve := &cobra.Command{ - Use: "serve ", - Long: `Starts the Horizon Lite server, binding it to port 8080 on all -local interfaces of the host. You can refer to the OpenAPI documentation located -at the /api endpoint to see what endpoints are supported. - -The should be a URL to meta archives from which to read unpacked -ledger files, while the should be a URL containing indices that -break down accounts by active ledgers.`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - cmd.Usage() - return - } - - sourceUrl, indexStoreUrl := args[0], args[1] - - networkPassphrase, _ := cmd.Flags().GetString("network-passphrase") - switch networkPassphrase { - case "testnet": - networkPassphrase = network.TestNetworkPassphrase - case "pubnet": - networkPassphrase = network.PublicNetworkPassphrase - } - - cacheDir, _ := cmd.Flags().GetString("ledger-cache") - cacheSize, _ := cmd.Flags().GetUint("ledger-cache-size") - logLevelParam, _ := cmd.Flags().GetString("log-level") - downloadCount, _ := cmd.Flags().GetUint("parallel-downloads") - - L := log.WithField("service", "horizon-lite") - logLevel, err := logrus.ParseLevel(logLevelParam) - if err != nil { - log.Warnf("Failed to parse log level '%s', defaulting to 'info'.", logLevelParam) - logLevel = log.InfoLevel - } - L.SetLevel(logLevel) - L.Info("Starting lighthorizon!") - - registry := prometheus.NewRegistry() - indexStore, err := index.ConnectWithConfig(index.StoreConfig{ - URL: indexStoreUrl, - Log: L.WithField("service", "index"), - Metrics: registry, - }) - if err != nil { - log.Fatal(err) - return - } - - ingester, err := ingester.NewIngester(ingester.IngesterConfig{ - SourceUrl: sourceUrl, - NetworkPassphrase: networkPassphrase, - CacheDir: cacheDir, - CacheSize: int(cacheSize), - ParallelDownloads: downloadCount, - }) - if err != nil { - log.Fatal(err) - return - } - - latestLedger, err := ingester.GetLatestLedgerSequence(context.Background()) - if err != nil { - log.Fatalf("Failed to retrieve latest ledger from %s: %v", sourceUrl, err) - return - } - log.Infof("The latest ledger stored at %s is %d.", sourceUrl, latestLedger) - - cachePreloadCount, _ := cmd.Flags().GetUint32("ledger-cache-preload") - cachePreloadStart, _ := cmd.Flags().GetUint32("ledger-cache-preload-start") - if cachePreloadCount > 0 { - if cacheDir == "" { - log.Fatalf("--ledger-cache-preload=%d specified but no "+ - "--ledger-cache directory provided.", - cachePreloadCount) - return - } else { - startLedger := int(latestLedger) - int(cachePreloadCount) - if cachePreloadStart > 0 { - startLedger = int(cachePreloadStart) - } - if startLedger <= 0 { - log.Warnf("Starting ledger invalid (%d), defaulting to 2.", - startLedger) - startLedger = 2 - } - - log.Infof("Preloading cache at %s with %d ledgers, starting at ledger %d.", - cacheDir, startLedger, cachePreloadCount) - go func() { - tools.BuildCache(sourceUrl, cacheDir, - uint32(startLedger), cachePreloadCount, false) - }() - } - } - - Config := services.Config{ - Ingester: ingester, - Passphrase: networkPassphrase, - IndexStore: indexStore, - Metrics: services.NewMetrics(registry), - } - - lightHorizon := services.LightHorizon{ - Transactions: &services.TransactionRepository{ - Config: Config, - }, - Operations: &services.OperationRepository{ - Config: Config, - }, - } - - // Inject our config into the root response. - router := lightHorizonHTTPHandler(registry, lightHorizon).(*chi.Mux) - router.MethodFunc(http.MethodGet, "/", actions.Root(actions.RootResponse{ - Version: HorizonLiteVersion, - LedgerSource: sourceUrl, - IndexSource: indexStoreUrl, - - LatestLedger: latestLedger, - })) - - log.Fatal(http.ListenAndServe(":8080", router)) - }, - } - - serve.Flags().String("log-level", "info", - "logging level: 'info', 'debug', 'warn', 'error', 'panic', 'fatal', or 'trace'") - serve.Flags().String("network-passphrase", "pubnet", "network passphrase") - serve.Flags().String("ledger-cache", "", "path to cache frequently-used ledgers; "+ - "if left empty, uses a temporary directory") - serve.Flags().Uint("ledger-cache-size", defaultCacheSize, - "number of ledgers to store in the cache") - serve.Flags().Uint32("ledger-cache-preload", 0, - "should the cache come preloaded with the latest ledgers?") - serve.Flags().Uint32("ledger-cache-preload-start", 0, - "the preload should start at ledger ") - serve.Flags().Uint("parallel-downloads", 1, - "how many workers should download ledgers in parallel?") - - cmd.AddCommand(serve) - tools.AddCacheCommands(cmd) - tools.AddIndexCommands(cmd) - cmd.Execute() -} diff --git a/exp/lighthorizon/services/cursor.go b/exp/lighthorizon/services/cursor.go deleted file mode 100644 index 8f2d2b0b5c..0000000000 --- a/exp/lighthorizon/services/cursor.go +++ /dev/null @@ -1,102 +0,0 @@ -package services - -import ( - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/toid" -) - -// CursorManager describes a way to control how a cursor advances for a -// particular indexing strategy. -type CursorManager interface { - Begin(cursor int64) (int64, error) - Advance(times uint) (int64, error) -} - -type AccountActivityCursorManager struct { - AccountId string - - store index.Store - lastCursor *toid.ID -} - -func NewCursorManagerForAccountActivity(store index.Store, accountId string) *AccountActivityCursorManager { - return &AccountActivityCursorManager{AccountId: accountId, store: store} -} - -func (c *AccountActivityCursorManager) Begin(cursor int64) (int64, error) { - freq := checkpointManager.GetCheckpointFrequency() - id := toid.Parse(cursor) - lastCheckpoint := uint32(0) - if id.LedgerSequence >= int32(checkpointManager.GetCheckpointFrequency()) { - lastCheckpoint = index.GetCheckpointNumber(uint32(id.LedgerSequence)) - } - - // We shouldn't take the provided cursor for granted: instead, we should - // skip ahead to the first active ledger that's >= the given cursor. - // - // For example, someone might say ?cursor=0 but the first active checkpoint - // is actually 40M ledgers in. - firstCheckpoint, err := c.store.NextActive(c.AccountId, allTransactionsIndex, lastCheckpoint) - if err != nil { - return cursor, err - } - - nextLedger := (firstCheckpoint - 1) * freq - - // However, if the given cursor is actually *more* specific than the index - // can give us (e.g. somewhere *within* an active checkpoint range), prefer - // it rather than starting over. - if nextLedger < uint32(id.LedgerSequence) { - better := toid.Parse(cursor) - c.lastCursor = &better - return cursor, nil - } - - c.lastCursor = toid.New(int32(nextLedger), 1, 1) - return c.lastCursor.ToInt64(), nil -} - -func (c *AccountActivityCursorManager) Advance(times uint) (int64, error) { - if c.lastCursor == nil { - panic("invalid cursor, call Begin() first") - } - - // - // Advancing the cursor means deciding whether or not we need to query - // the index. - // - freq := checkpointManager.GetCheckpointFrequency() - - for i := uint(1); i <= times; i++ { - lastLedger := uint32(c.lastCursor.LedgerSequence) - - if checkpointManager.IsCheckpoint(lastLedger) { - // If the last cursor we looked at was a checkpoint ledger, then we - // need to jump ahead to the next checkpoint. Note that NextActive() - // is "inclusive" so if the parameter is an active checkpoint it - // will return itself. - checkpoint := index.GetCheckpointNumber(uint32(c.lastCursor.LedgerSequence)) - checkpoint, err := c.store.NextActive(c.AccountId, allTransactionsIndex, checkpoint+1) - if err != nil { - return c.lastCursor.ToInt64(), err - } - - // We add a -1 here because an active checkpoint indicates that an - // account had activity in the *previous* 64 ledgers, so we need to - // backtrack to that ledger range. - c.lastCursor = toid.New(int32((checkpoint-1)*freq), 1, 1) - } else { - // Otherwise, we can just bump the ledger number. - c.lastCursor = toid.New(int32(lastLedger+1), 1, 1) - } - } - - return c.lastCursor.ToInt64(), nil -} - -var _ CursorManager = (*AccountActivityCursorManager)(nil) // ensure conformity to the interface - -// getLedgerFromCursor is a helpful way to turn a cursor into a ledger number -func getLedgerFromCursor(cursor int64) uint32 { - return uint32(toid.Parse(cursor).LedgerSequence) -} diff --git a/exp/lighthorizon/services/cursor_test.go b/exp/lighthorizon/services/cursor_test.go deleted file mode 100644 index 2112ae3715..0000000000 --- a/exp/lighthorizon/services/cursor_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package services - -import ( - "io" - "testing" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/keypair" - "github.com/stellar/go/toid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - checkpointMgr = historyarchive.NewCheckpointManager(0) -) - -func TestAccountTransactionCursorManager(t *testing.T) { - freq := int32(checkpointMgr.GetCheckpointFrequency()) - accountId := keypair.MustRandom().Address() - - // Create an index and fill it with some checkpoint details. - tmp := t.TempDir() - store, err := index.NewFileStore(tmp, - index.StoreConfig{ - URL: "file://" + tmp, - Workers: 4, - }, - ) - require.NoError(t, err) - - for _, checkpoint := range []uint32{1, 5, 10, 12} { - require.NoError(t, store.AddParticipantsToIndexes( - checkpoint, allTransactionsIndex, []string{accountId})) - } - - cursorMgr := NewCursorManagerForAccountActivity(store, accountId) - - cursor := toid.New(1, 1, 1) - var nextCursor int64 - - // first checkpoint works - nextCursor, err = cursorMgr.Begin(cursor.ToInt64()) - require.NoError(t, err) - assert.EqualValues(t, 1, getLedgerFromCursor(nextCursor)) - - // cursor is preserved if mid-active-range - cursor.LedgerSequence = freq / 2 - nextCursor, err = cursorMgr.Begin(cursor.ToInt64()) - require.NoError(t, err) - assert.EqualValues(t, cursor.LedgerSequence, getLedgerFromCursor(nextCursor)) - - // cursor jumps ahead if not active - cursor.LedgerSequence = 2 * freq - nextCursor, err = cursorMgr.Begin(cursor.ToInt64()) - require.NoError(t, err) - assert.EqualValues(t, 4*freq, getLedgerFromCursor(nextCursor)) - - // cursor increments - for i := int32(1); i < freq; i++ { - nextCursor, err = cursorMgr.Advance(1) - require.NoError(t, err) - assert.EqualValues(t, 4*freq+i, getLedgerFromCursor(nextCursor)) - } - - // cursor jumps to next active checkpoint - nextCursor, err = cursorMgr.Advance(1) - require.NoError(t, err) - assert.EqualValues(t, 9*freq, getLedgerFromCursor(nextCursor)) - - // cursor skips - nextCursor, err = cursorMgr.Advance(5) - require.NoError(t, err) - assert.EqualValues(t, 9*freq+5, getLedgerFromCursor(nextCursor)) - - // cursor jumps to next active when skipping - nextCursor, err = cursorMgr.Advance(uint(freq - 5)) - require.NoError(t, err) - assert.EqualValues(t, 11*freq, getLedgerFromCursor(nextCursor)) - - // cursor EOFs at the end - nextCursor, err = cursorMgr.Advance(uint(freq - 1)) - require.NoError(t, err) - assert.EqualValues(t, 12*freq-1, getLedgerFromCursor(nextCursor)) - _, err = cursorMgr.Advance(1) - assert.ErrorIs(t, err, io.EOF) - - // cursor EOFs if skipping past the end - rewind := toid.New(int32(getLedgerFromCursor(nextCursor)-5), 0, 0) - nextCursor, err = cursorMgr.Begin(rewind.ToInt64()) - require.NoError(t, err) - assert.EqualValues(t, rewind.LedgerSequence, getLedgerFromCursor(nextCursor)) - _, err = cursorMgr.Advance(uint(freq)) - assert.ErrorIs(t, err, io.EOF) -} diff --git a/exp/lighthorizon/services/main.go b/exp/lighthorizon/services/main.go deleted file mode 100644 index d391fc8baf..0000000000 --- a/exp/lighthorizon/services/main.go +++ /dev/null @@ -1,216 +0,0 @@ -package services - -import ( - "context" - "io" - "time" - - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/exp/constraints" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" - "github.com/stellar/go/xdr" -) - -const ( - allTransactionsIndex = "all/all" - allPaymentsIndex = "all/payments" - slowFetchDurationThreshold = time.Second -) - -var ( - checkpointManager = historyarchive.NewCheckpointManager(0) -) - -// NewMetrics returns a Metrics instance containing all the prometheus -// metrics necessary for running light horizon services. -func NewMetrics(registry *prometheus.Registry) Metrics { - const minute = 60 - const day = 24 * 60 * minute - responseAgeHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "horizon_lite", - Subsystem: "services", - Name: "response_age", - Buckets: []float64{ - 5 * minute, - 60 * minute, - day, - 7 * day, - 30 * day, - 90 * day, - 180 * day, - 365 * day, - }, - Help: "Age of the response for each service, sliding window = 10m", - }, - []string{"request", "successful"}, - ) - registry.MustRegister(responseAgeHistogram) - return Metrics{ - ResponseAgeHistogram: responseAgeHistogram, - } -} - -type LightHorizon struct { - Operations OperationService - Transactions TransactionService -} - -type Metrics struct { - ResponseAgeHistogram *prometheus.HistogramVec -} - -type Config struct { - Ingester ingester.Ingester - IndexStore index.Store - Passphrase string - Metrics Metrics -} - -// searchCallback is a generic way for any endpoint to process a transaction and -// its corresponding ledger. It should return whether or not we should stop -// processing (e.g. when a limit is reached) and any error that occurred. -type searchCallback func(ingester.LedgerTransaction, *xdr.LedgerHeader) (finished bool, err error) - -func searchAccountTransactions(ctx context.Context, - cursor int64, - accountId string, - config Config, - callback searchCallback, -) error { - cursorMgr := NewCursorManagerForAccountActivity(config.IndexStore, accountId) - cursor, err := cursorMgr.Begin(cursor) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - nextLedger := getLedgerFromCursor(cursor) - - log.WithField("cursor", cursor). - Debugf("Searching %s for account %s starting at ledger %d", - allTransactionsIndex, accountId, nextLedger) - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - fullStart := time.Now() - fetchDuration := time.Duration(0) - processDuration := time.Duration(0) - indexFetchDuration := time.Duration(0) - count := int64(0) - - defer func() { - log.WithField("ledgers", count). - WithField("ledger-fetch", fetchDuration). - WithField("ledger-process", processDuration). - WithField("index-fetch", indexFetchDuration). - WithField("avg-ledger-fetch", getAverageDuration(fetchDuration, count)). - WithField("avg-ledger-process", getAverageDuration(processDuration, count)). - WithField("avg-index-fetch", getAverageDuration(indexFetchDuration, count)). - WithField("total", time.Since(fullStart)). - Infof("Fulfilled request for account %s at cursor %d", accountId, cursor) - }() - - checkpointMgr := historyarchive.NewCheckpointManager(0) - - for { - if checkpointMgr.IsCheckpoint(nextLedger) { - r := historyarchive.Range{ - Low: nextLedger, - High: checkpointMgr.NextCheckpoint(nextLedger + 1), - } - log.Infof("Preparing ledger range [%d, %d]", r.Low, r.High) - if innerErr := config.Ingester.PrepareRange(ctx, r); innerErr != nil { - log.Errorf("failed to prepare ledger range [%d, %d]: %v", - r.Low, r.High, innerErr) - } - } - - start := time.Now() - ledger, innerErr := config.Ingester.GetLedger(ctx, nextLedger) - - // TODO: We should have helpful error messages when innerErr points to a - // 404 for that particular ledger, since that situation shouldn't happen - // under normal operations, but rather indicates a problem with the - // backing archive. - if innerErr != nil { - return errors.Wrapf(innerErr, - "failed to retrieve ledger %d from archive", nextLedger) - } - count++ - thisFetchDuration := time.Since(start) - if thisFetchDuration > slowFetchDurationThreshold { - log.WithField("duration", thisFetchDuration). - Warnf("Fetching ledger %d was really slow", nextLedger) - } - fetchDuration += thisFetchDuration - - start = time.Now() - reader, innerErr := config.Ingester.NewLedgerTransactionReader(ledger) - if innerErr != nil { - return errors.Wrapf(innerErr, - "failed to read ledger %d", nextLedger) - } - - for { - if ctx.Err() != nil { - return ctx.Err() - } - - tx, readErr := reader.Read() - if readErr == io.EOF { - break - } else if readErr != nil { - return readErr - } - - // Note: If we move to ledger-based indices, we don't need this, - // since we have a guarantee that the transaction will contain - // the account as a participant. - participants, participantErr := ingester.GetTransactionParticipants(tx) - if participantErr != nil { - return participantErr - } - - if _, found := participants[accountId]; found { - finished, callBackErr := callback(tx, &ledger.V0.V0.LedgerHeader.Header) - if callBackErr != nil { - return callBackErr - } else if finished { - processDuration += time.Since(start) - return nil - } - } - } - - processDuration += time.Since(start) - start = time.Now() - - cursor, err = cursorMgr.Advance(1) - if err != nil && err != io.EOF { - return err - } - - nextLedger = getLedgerFromCursor(cursor) - indexFetchDuration += time.Since(start) - if err == io.EOF { - break - } - } - - return nil -} - -func getAverageDuration[ - T constraints.Signed | constraints.Float, -](d time.Duration, count T) time.Duration { - if count == 0 { - return 0 // don't bomb on div-by-zero - } - return time.Duration(int64(float64(d.Nanoseconds()) / float64(count))) -} diff --git a/exp/lighthorizon/services/main_test.go b/exp/lighthorizon/services/main_test.go deleted file mode 100644 index a8a3958214..0000000000 --- a/exp/lighthorizon/services/main_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package services - -import ( - "context" - "io" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/ingest" - "github.com/stellar/go/toid" - "github.com/stellar/go/xdr" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -var ( - passphrase = "White New England clam chowder" - accountId = "GDCXSQPVE45DVGT2ZRFFIIHSJ2EJED65W6AELGWIDRMPMWNXCEBJ4FKX" - startLedgerSeq = 1586112 -) - -func TestItGetsTransactionsByAccount(t *testing.T) { - ctx := context.Background() - - // this is in the checkpoint range prior to the first active checkpoint - ledgerSeq := checkpointMgr.PrevCheckpoint(uint32(startLedgerSeq)) - cursor := toid.New(int32(ledgerSeq), 1, 1).ToInt64() - - t.Run("first", func(tt *testing.T) { - txService := newTransactionService(ctx) - - txs, err := txService.GetTransactionsByAccount(ctx, cursor, 1, accountId) - require.NoError(tt, err) - require.Len(tt, txs, 1) - require.Equal(tt, txs[0].LedgerHeader.LedgerSeq, xdr.Uint32(1586113)) - require.EqualValues(tt, txs[0].TxIndex, 2) - }) - - t.Run("without cursor", func(tt *testing.T) { - txService := newTransactionService(ctx) - - txs, err := txService.GetTransactionsByAccount(ctx, 0, 1, accountId) - require.NoError(tt, err) - require.Len(tt, txs, 1) - require.Equal(tt, txs[0].LedgerHeader.LedgerSeq, xdr.Uint32(1586113)) - require.EqualValues(tt, txs[0].TxIndex, 2) - }) - - t.Run("with limit", func(tt *testing.T) { - txService := newTransactionService(ctx) - - txs, err := txService.GetTransactionsByAccount(ctx, cursor, 5, accountId) - require.NoError(tt, err) - require.Len(tt, txs, 2) - require.Equal(tt, txs[0].LedgerHeader.LedgerSeq, xdr.Uint32(1586113)) - require.EqualValues(tt, txs[0].TxIndex, 2) - require.Equal(tt, txs[1].LedgerHeader.LedgerSeq, xdr.Uint32(1586114)) - require.EqualValues(tt, txs[1].TxIndex, 1) - }) -} - -func TestItGetsOperationsByAccount(t *testing.T) { - ctx := context.Background() - - // this is in the checkpoint range prior to the first active checkpoint - ledgerSeq := checkpointMgr.PrevCheckpoint(uint32(startLedgerSeq)) - cursor := toid.New(int32(ledgerSeq), 1, 1).ToInt64() - - t.Run("first", func(tt *testing.T) { - opsService := newOperationService(ctx) - - // this should start at next checkpoint - ops, err := opsService.GetOperationsByAccount(ctx, cursor, 1, accountId) - require.NoError(tt, err) - require.Len(tt, ops, 1) - require.Equal(tt, ops[0].LedgerHeader.LedgerSeq, xdr.Uint32(1586113)) - require.Equal(tt, ops[0].TxIndex, int32(2)) - - }) - - t.Run("with limit", func(tt *testing.T) { - opsService := newOperationService(ctx) - - // this should start at next checkpoint - ops, err := opsService.GetOperationsByAccount(ctx, cursor, 5, accountId) - require.NoError(tt, err) - require.Len(tt, ops, 2) - require.Equal(tt, ops[0].LedgerHeader.LedgerSeq, xdr.Uint32(1586113)) - require.Equal(tt, ops[0].TxIndex, int32(2)) - require.Equal(tt, ops[1].LedgerHeader.LedgerSeq, xdr.Uint32(1586114)) - require.Equal(tt, ops[1].TxIndex, int32(1)) - }) -} - -func mockArchiveAndIndex(ctx context.Context) (ingester.Ingester, index.Store) { - mockArchive := &ingester.MockIngester{} - mockReaderLedger1 := &ingester.MockLedgerTransactionReader{} - mockReaderLedger2 := &ingester.MockLedgerTransactionReader{} - mockReaderLedger3 := &ingester.MockLedgerTransactionReader{} - mockReaderLedgerTheRest := &ingester.MockLedgerTransactionReader{} - - expectedLedger1 := testLedger(startLedgerSeq) - expectedLedger2 := testLedger(startLedgerSeq + 1) - expectedLedger3 := testLedger(startLedgerSeq + 2) - - // throw an irrelevant account in there to make sure it's filtered - source := xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU") - source2 := xdr.MustAddress(accountId) - - // assert results iterate sequentially across ops-tx-ledgers - expectedLedger1Tx1 := testLedgerTx(source, 1, 34, 35) - expectedLedger1Tx2 := testLedgerTx(source, 2, 34) - expectedLedger2Tx1 := testLedgerTx(source, 1, 34) - expectedLedger2Tx2 := testLedgerTx(source2, 2, 34) - expectedLedger3Tx1 := testLedgerTx(source2, 1, 34) - expectedLedger3Tx2 := testLedgerTx(source, 2, 34) - - mockReaderLedger1. - On("Read").Return(expectedLedger1Tx1, nil).Once(). - On("Read").Return(expectedLedger1Tx2, nil).Once(). - On("Read").Return(ingester.LedgerTransaction{}, io.EOF).Once() - - mockReaderLedger2. - On("Read").Return(expectedLedger2Tx1, nil).Once(). - On("Read").Return(expectedLedger2Tx2, nil).Once(). - On("Read").Return(ingester.LedgerTransaction{}, io.EOF).Once() - - mockReaderLedger3. - On("Read").Return(expectedLedger3Tx1, nil).Once(). - On("Read").Return(expectedLedger3Tx2, nil).Once(). - On("Read").Return(ingester.LedgerTransaction{}, io.EOF).Once() - - mockReaderLedgerTheRest. - On("Read").Return(ingester.LedgerTransaction{}, io.EOF) - - mockArchive. - On("GetLedger", mock.Anything, uint32(1586112)).Return(expectedLedger1, nil). - On("GetLedger", mock.Anything, uint32(1586113)).Return(expectedLedger2, nil). - On("GetLedger", mock.Anything, uint32(1586114)).Return(expectedLedger3, nil). - On("GetLedger", mock.Anything, mock.AnythingOfType("uint32")). - Return(xdr.SerializedLedgerCloseMeta{}, nil) - - mockArchive. - On("NewLedgerTransactionReader", expectedLedger1).Return(mockReaderLedger1, nil).Once(). - On("NewLedgerTransactionReader", expectedLedger2).Return(mockReaderLedger2, nil).Once(). - On("NewLedgerTransactionReader", expectedLedger3).Return(mockReaderLedger3, nil).Once(). - On("NewLedgerTransactionReader", mock.AnythingOfType("xdr.SerializedLedgerCloseMeta")). - Return(mockReaderLedgerTheRest, nil). - On("PrepareRange", mock.Anything, mock.Anything).Return(nil) - - // should be 24784 - activeChk := uint32(index.GetCheckpointNumber(uint32(startLedgerSeq))) - mockStore := &index.MockStore{} - mockStore. - On("NextActive", accountId, mock.Anything, uint32(0)).Return(activeChk, nil). // start - On("NextActive", accountId, mock.Anything, activeChk-1).Return(activeChk, nil). // prev - On("NextActive", accountId, mock.Anything, activeChk).Return(activeChk, nil). // curr - On("NextActive", accountId, mock.Anything, activeChk+1).Return(uint32(0), io.EOF) // next - - return mockArchive, mockStore -} - -func testLedger(seq int) xdr.SerializedLedgerCloseMeta { - return xdr.SerializedLedgerCloseMeta{ - V: 0, - V0: &xdr.LedgerCloseMeta{ - V0: &xdr.LedgerCloseMetaV0{ - LedgerHeader: xdr.LedgerHeaderHistoryEntry{ - Header: xdr.LedgerHeader{ - LedgerSeq: xdr.Uint32(seq), - }, - }, - }, - }, - } -} - -func testLedgerTx(source xdr.AccountId, txIndex uint32, bumpTos ...int) ingester.LedgerTransaction { - code := xdr.TransactionResultCodeTxSuccess - - operations := []xdr.Operation{} - for _, bumpTo := range bumpTos { - operations = append(operations, xdr.Operation{ - Body: xdr.OperationBody{ - Type: xdr.OperationTypeBumpSequence, - BumpSequenceOp: &xdr.BumpSequenceOp{ - BumpTo: xdr.SequenceNumber(bumpTo), - }, - }, - }) - } - - return ingester.LedgerTransaction{ - LedgerTransaction: &ingest.LedgerTransaction{ - Result: xdr.TransactionResultPair{ - TransactionHash: xdr.Hash{}, - Result: xdr.TransactionResult{ - Result: xdr.TransactionResultResult{ - Code: code, - InnerResultPair: &xdr.InnerTransactionResultPair{}, - Results: &[]xdr.OperationResult{}, - }, - }, - }, - Envelope: xdr.TransactionEnvelope{ - Type: xdr.EnvelopeTypeEnvelopeTypeTx, - V1: &xdr.TransactionV1Envelope{ - Tx: xdr.Transaction{ - SourceAccount: source.ToMuxedAccount(), - Operations: operations, - }, - }, - }, - UnsafeMeta: xdr.TransactionMeta{ - V: 2, - V2: &xdr.TransactionMetaV2{ - Operations: make([]xdr.OperationMeta, len(bumpTos)), - }, - }, - Index: txIndex, - }, - } -} - -func newTransactionService(ctx context.Context) TransactionService { - ingest, store := mockArchiveAndIndex(ctx) - return &TransactionRepository{ - Config: Config{ - Ingester: ingest, - IndexStore: store, - Passphrase: passphrase, - Metrics: NewMetrics(prometheus.NewRegistry()), - }, - } -} - -func newOperationService(ctx context.Context) OperationService { - ingest, store := mockArchiveAndIndex(ctx) - return &OperationRepository{ - Config: Config{ - Ingester: ingest, - IndexStore: store, - Passphrase: passphrase, - Metrics: NewMetrics(prometheus.NewRegistry()), - }, - } -} diff --git a/exp/lighthorizon/services/mock_services.go b/exp/lighthorizon/services/mock_services.go deleted file mode 100644 index be573489e0..0000000000 --- a/exp/lighthorizon/services/mock_services.go +++ /dev/null @@ -1,32 +0,0 @@ -package services - -import ( - "context" - - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stretchr/testify/mock" -) - -type MockTransactionService struct { - mock.Mock -} - -func (m *MockTransactionService) GetTransactionsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, -) ([]common.Transaction, error) { - args := m.Called(ctx, cursor, limit, accountId) - return args.Get(0).([]common.Transaction), args.Error(1) -} - -type MockOperationService struct { - mock.Mock -} - -func (m *MockOperationService) GetOperationsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, -) ([]common.Operation, error) { - args := m.Called(ctx, cursor, limit, accountId) - return args.Get(0).([]common.Operation), args.Error(1) -} diff --git a/exp/lighthorizon/services/operations.go b/exp/lighthorizon/services/operations.go deleted file mode 100644 index 1236bcdb01..0000000000 --- a/exp/lighthorizon/services/operations.go +++ /dev/null @@ -1,90 +0,0 @@ -package services - -import ( - "context" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/support/log" - "github.com/stellar/go/xdr" -) - -type OperationService interface { - GetOperationsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, - ) ([]common.Operation, error) -} - -type OperationRepository struct { - OperationService - Config Config -} - -func (or *OperationRepository) GetOperationsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, -) ([]common.Operation, error) { - ops := []common.Operation{} - - opsCallback := func(tx ingester.LedgerTransaction, ledgerHeader *xdr.LedgerHeader) (bool, error) { - for operationOrder, op := range tx.Envelope.Operations() { - opParticipants, err := ingester.GetOperationParticipants(tx, op, operationOrder) - if err != nil { - return false, err - } - - if _, foundInOp := opParticipants[accountId]; foundInOp { - ops = append(ops, common.Operation{ - TransactionEnvelope: &tx.Envelope, - TransactionResult: &tx.Result.Result, - LedgerHeader: ledgerHeader, - TxIndex: int32(tx.Index), - OpIndex: int32(operationOrder), - }) - - if uint64(len(ops)) == limit { - return true, nil - } - } - } - - return false, nil - } - - err := searchAccountTransactions(ctx, cursor, accountId, or.Config, opsCallback) - if age := operationsResponseAgeSeconds(ops); age >= 0 { - or.Config.Metrics.ResponseAgeHistogram.With(prometheus.Labels{ - "request": "GetOperationsByAccount", - "successful": strconv.FormatBool(err == nil), - }).Observe(age) - } - - return ops, err -} - -func operationsResponseAgeSeconds(ops []common.Operation) float64 { - if len(ops) == 0 { - return -1 - } - - oldest := ops[0].LedgerHeader.ScpValue.CloseTime - for i := 1; i < len(ops); i++ { - if closeTime := ops[i].LedgerHeader.ScpValue.CloseTime; closeTime < oldest { - oldest = closeTime - } - } - - lastCloseTime := time.Unix(int64(oldest), 0).UTC() - now := time.Now().UTC() - if now.Before(lastCloseTime) { - log.Errorf("current time %v is before oldest operation close time %v", now, lastCloseTime) - return -1 - } - return now.Sub(lastCloseTime).Seconds() -} - -var _ OperationService = (*OperationRepository)(nil) // ensure conformity to the interface diff --git a/exp/lighthorizon/services/transactions.go b/exp/lighthorizon/services/transactions.go deleted file mode 100644 index 42d3964614..0000000000 --- a/exp/lighthorizon/services/transactions.go +++ /dev/null @@ -1,76 +0,0 @@ -package services - -import ( - "context" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stellar/go/exp/lighthorizon/common" - "github.com/stellar/go/exp/lighthorizon/ingester" - "github.com/stellar/go/support/log" - "github.com/stellar/go/xdr" -) - -type TransactionRepository struct { - TransactionService - Config Config -} - -type TransactionService interface { - GetTransactionsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, - ) ([]common.Transaction, error) -} - -func (tr *TransactionRepository) GetTransactionsByAccount(ctx context.Context, - cursor int64, limit uint64, - accountId string, -) ([]common.Transaction, error) { - txs := []common.Transaction{} - - txsCallback := func(tx ingester.LedgerTransaction, ledgerHeader *xdr.LedgerHeader) (bool, error) { - txs = append(txs, common.Transaction{ - LedgerTransaction: &tx, - LedgerHeader: ledgerHeader, - TxIndex: int32(tx.Index), - NetworkPassphrase: tr.Config.Passphrase, - }) - - return uint64(len(txs)) == limit, nil - } - - err := searchAccountTransactions(ctx, cursor, accountId, tr.Config, txsCallback) - if age := transactionsResponseAgeSeconds(txs); age >= 0 { - tr.Config.Metrics.ResponseAgeHistogram.With(prometheus.Labels{ - "request": "GetTransactionsByAccount", - "successful": strconv.FormatBool(err == nil), - }).Observe(age) - } - - return txs, err -} - -func transactionsResponseAgeSeconds(txs []common.Transaction) float64 { - if len(txs) == 0 { - return -1 - } - - oldest := txs[0].LedgerHeader.ScpValue.CloseTime - for i := 1; i < len(txs); i++ { - if closeTime := txs[i].LedgerHeader.ScpValue.CloseTime; closeTime < oldest { - oldest = closeTime - } - } - - lastCloseTime := time.Unix(int64(oldest), 0).UTC() - now := time.Now().UTC() - if now.Before(lastCloseTime) { - log.Errorf("current time %v is before oldest transaction close time %v", now, lastCloseTime) - return -1 - } - return now.Sub(lastCloseTime).Seconds() -} - -var _ TransactionService = (*TransactionRepository)(nil) // ensure conformity to the interface diff --git a/exp/lighthorizon/tools/cache.go b/exp/lighthorizon/tools/cache.go deleted file mode 100644 index 0290fcb164..0000000000 --- a/exp/lighthorizon/tools/cache.go +++ /dev/null @@ -1,270 +0,0 @@ -package tools - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/spf13/cobra" - - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/support/log" - "github.com/stellar/go/support/storage" -) - -const ( - defaultCacheCount = (60 * 60 * 24) / 5 // ~24hrs worth of ledgers -) - -func AddCacheCommands(parent *cobra.Command) *cobra.Command { - cmd := &cobra.Command{ - Use: "cache", - Long: "Manages the on-disk cache of ledgers.", - Example: ` -cache build --start 1234 --count 1000 s3://txmeta /tmp/example -cache purge /tmp/example 1234 1300 -cache show /tmp/example`, - RunE: func(cmd *cobra.Command, args []string) error { - // require a subcommand - this is just a "category" - return cmd.Help() - }, - } - - purge := &cobra.Command{ - Use: "purge [flags] path ", - Long: "Purges individual ledgers (or ranges) from the cache, or the entire cache.", - Example: ` -purge /tmp/example # empty the whole cache -purge /tmp/example 1000 # purge one ledger -purge /tmp/example 1000 1005 # purge a ledger range`, - RunE: func(cmd *cobra.Command, args []string) error { - // The first parameter must be a valid cache directory. - // You can then pass nothing, a single ledger, or a ledger range. - if len(args) < 1 || len(args) > 3 { - return cmd.Usage() - } - - var err error - var start, end uint64 - if len(args) > 1 { - start, err = strconv.ParseUint(args[1], 10, 32) - if err != nil { - cmd.Printf("Error: '%s' not a ledger sequence: %v\n", args[1], err) - return cmd.Usage() - } - } - end = start // fallback - - if len(args) == 3 { - end, err = strconv.ParseUint(args[2], 10, 32) - if err != nil { - cmd.Printf("Error: '%s' not a ledger sequence: %v\n", args[2], err) - return cmd.Usage() - } else if end < start { - cmd.Printf("Error: end precedes start (%d < %d)\n", end, start) - return cmd.Usage() - } - } - - path := args[0] - if start > 0 { - return PurgeLedgers(path, uint32(start), uint32(end)) - } - return PurgeCache(path) - }, - } - show := &cobra.Command{ - Use: "show ", - Long: "Traverses the on-disk cache and prints out cached ledger ranges.", - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return cmd.Usage() - } - return ShowCache(args[0]) - }, - } - build := &cobra.Command{ - Use: "build [flags] ", - Example: "See cache --help text", - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 2 { - cmd.Println("Error: 2 positional arguments are required") - return cmd.Usage() - } - - start, err := cmd.Flags().GetUint32("start") - if err != nil || start < 2 { - cmd.Println("--start is required to be a ledger sequence") - return cmd.Usage() - } - - count, err := cmd.Flags().GetUint32("count") - if err != nil || count <= 0 { - cmd.Println("--count should be a positive 32-bit integer") - return cmd.Usage() - } - repair, _ := cmd.Flags().GetBool("repair") - return BuildCache(args[0], args[1], start, count, repair) - }, - } - - build.Flags().Bool("repair", false, "attempt to purge the cache and retry ledgers that error") - build.Flags().Uint32("start", 0, "first ledger to cache (required)") - build.Flags().Uint32("count", defaultCacheCount, "number of ledgers to cache") - - cmd.AddCommand(build, purge, show) - if parent == nil { - return cmd - } - - parent.AddCommand(cmd) - return parent -} - -func BuildCache(ledgerSource, cacheDir string, start uint32, count uint32, repair bool) error { - fullStart := time.Now() - L := log.DefaultLogger - L.SetLevel(log.InfoLevel) - log := L - - ctx := context.Background() - store, err := storage.ConnectBackend(ledgerSource, storage.ConnectOptions{ - Context: ctx, - Wrap: func(store storage.Storage) (storage.Storage, error) { - return storage.MakeOnDiskCache(store, cacheDir, uint(count)) - }, - }) - if err != nil { - log.Errorf("Couldn't create local cache for '%s' at '%s': %v", - ledgerSource, cacheDir, err) - return err - } - - log.Infof("Connected to ledger source at %s", ledgerSource) - log.Infof("Connected to ledger cache at %s", cacheDir) - - source := metaarchive.NewMetaArchive(store) - log.Infof("Filling local cache of ledgers at %s...", cacheDir) - log.Infof("Ledger range: [%d, %d] (%d ledgers)", - start, start+count-1, count) - - successful := uint(0) - for i := uint32(0); i < count; i++ { - ledgerSeq := start + uint32(i) - - // do "best effort" caching, skipping if too slow - dlCtx, dlCancel := context.WithTimeout(ctx, 10*time.Second) - start := time.Now() - - _, err := source.GetLedger(dlCtx, ledgerSeq) // this caches - dlCancel() - - if err != nil { - if repair && strings.Contains(err.Error(), "xdr") { - log.Warnf("Caching ledger %d failed, purging & retrying: %v", ledgerSeq, err) - store.(*storage.OnDiskCache).Evict(fmt.Sprintf("ledgers/%d", ledgerSeq)) - i-- // retry - } else { - log.Warnf("Caching ledger %d failed, skipping: %v", ledgerSeq, err) - log.Warn("If you see an XDR decoding error, the cache may be corrupted.") - log.Warnf("Run '%s purge %d' and try again, or pass --repair", - filepath.Base(os.Args[0]), ledgerSeq) - } - continue - } else { - successful++ - } - - duration := time.Since(start) - if duration > 2*time.Second { - log.WithField("duration", duration). - Warnf("Downloading ledger %d took a while.", ledgerSeq) - } - - log = log.WithField("failures", 1+uint(i)-successful) - if successful%97 == 0 { - log.Infof("Cached %d/%d ledgers (%0.1f%%)", successful, count, - 100*float64(successful)/float64(count)) - } - } - - duration := time.Since(fullStart) - log.WithField("duration", duration). - Infof("Cached %d ledgers into %s", successful, cacheDir) - - return nil -} - -func PurgeLedgers(cacheDir string, start, end uint32) error { - base := filepath.Join(cacheDir, "ledgers") - - successful := 0 - for i := start; i <= end; i++ { - ledgerPath := filepath.Join(base, strconv.FormatUint(uint64(i), 10)) - if err := os.Remove(ledgerPath); err != nil { - log.Warnf("Failed to remove cached ledger %d: %v", i, err) - continue - } - os.Remove(storage.NameLockfile(ledgerPath)) // ignore lockfile errors - log.Debugf("Purged ledger from %s", ledgerPath) - successful++ - } - - log.Infof("Purged %d cached ledgers from %s", successful, cacheDir) - return nil -} - -func PurgeCache(cacheDir string) error { - if err := os.RemoveAll(cacheDir); err != nil { - log.Warnf("Failed to remove cache directory (%s): %v", cacheDir, err) - return err - } - - log.Infof("Purged cache at %s", cacheDir) - return nil -} - -func ShowCache(cacheDir string) error { - files, err := ioutil.ReadDir(filepath.Join(cacheDir, "ledgers")) - if err != nil { - log.Errorf("Failed to read cache: %v", err) - return err - } - - ledgers := make([]uint32, 0, len(files)) - - for _, f := range files { - if f.IsDir() { - continue - } - - // If the name can be converted to a ledger sequence, track it. - if seq, errr := strconv.ParseUint(f.Name(), 10, 32); errr == nil { - ledgers = append(ledgers, uint32(seq)) - } - } - - log.Infof("Analyzed cache at %s: %d cached ledgers.", cacheDir, len(ledgers)) - if len(ledgers) == 0 { - return nil - } - - // Find consecutive ranges of ledgers in the cache - log.Infof("Cached ranges:") - firstSeq, lastSeq := ledgers[0], ledgers[0] - for i := 1; i < len(ledgers); i++ { - if ledgers[i]-1 != lastSeq { - log.Infof(" - [%d, %d]", firstSeq, lastSeq) - firstSeq = ledgers[i] - } - lastSeq = ledgers[i] - } - - log.Infof(" - [%d, %d]", firstSeq, lastSeq) - return nil -} diff --git a/exp/lighthorizon/tools/index.go b/exp/lighthorizon/tools/index.go deleted file mode 100644 index e37a7eb38a..0000000000 --- a/exp/lighthorizon/tools/index.go +++ /dev/null @@ -1,356 +0,0 @@ -package tools - -import ( - "context" - "io" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "github.com/spf13/cobra" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/strkey" - "github.com/stellar/go/support/collections/maps" - "github.com/stellar/go/support/collections/set" - "github.com/stellar/go/support/log" - "github.com/stellar/go/support/ordered" -) - -var ( - checkpointMgr = historyarchive.NewCheckpointManager(0) -) - -func AddIndexCommands(parent *cobra.Command) *cobra.Command { - cmd := &cobra.Command{ - Use: "index", - Long: "Lets you view details about an index source and modify it.", - Example: ` -index view file:///tmp/indices -index view file:///tmp/indices GAGJZWQ5QT34VK3U6W6YKRYFIK6YSAXQC6BHIIYLG6X3CE5QW2KAYNJR -index stats file:///tmp/indices`, - RunE: func(cmd *cobra.Command, args []string) error { - // require a subcommand - this is just a "category" - return cmd.Help() - }, - } - - stats := &cobra.Command{ - Use: "stats ", - Long: "Summarize the statistics (like the # of active checkpoints " + - "or accounts). Note that this is a very read-heavy operation and " + - "will incur download bandwidth costs if reading from remote, " + - "billable sources.", - Example: `stats s3://indices`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return cmd.Usage() - } - - path := args[0] - start := time.Now() - log.Infof("Analyzing indices at %s", path) - - allCheckpoints := set.Set[uint32]{} - allIndexNames := set.Set[string]{} - accounts := showAccounts(path, 0) - log.Infof("Analyzing indices for %d accounts.", len(accounts)) - - // We want to summarize as much as possible on a Ctrl+C event, so - // this handles that by setting up a context that gets cancelled on - // SIGINT. A second Ctrl+C will kill the process as usual. - // - // https://millhouse.dev/posts/graceful-shutdowns-in-golang-with-signal-notify-context - ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM, syscall.SIGINT) - defer stop() - go func() { - <-ctx.Done() - stop() - log.WithField("error", ctx.Err()). - Warn("Received interrupt, shutting down gracefully & summarizing findings...") - log.Warn("Press Ctrl+C again to abort.") - }() - - mostActiveAccountChk := 0 - mostActiveAccount := "" - for _, account := range accounts { - if ctx.Err() != nil { - break - } - - activity := getIndex(path, account, "", 0) - allCheckpoints.AddSlice(maps.Keys(activity)) - for _, names := range activity { - allIndexNames.AddSlice(names) - } - - if len(activity) > mostActiveAccountChk { - mostActiveAccount = account - mostActiveAccountChk = len(activity) - } - } - - ledgerCount := len(allCheckpoints) * int(checkpointMgr.GetCheckpointFrequency()) - - log.Info("Done analyzing indices, summarizing...") - log.Infof("") - log.Infof("=== Final Summary ===") - log.Infof("Analysis took %s.", time.Since(start)) - log.Infof("Path: %s", path) - log.Infof("Accounts: %d", len(accounts)) - log.Infof("Smallest checkpoint: %d", ordered.MinSlice(allCheckpoints.Slice())) - log.Infof("Largest checkpoint: %d", ordered.MaxSlice(allCheckpoints.Slice())) - log.Infof("Checkpoint count: %d (%d possible ledgers, ~%0.2f days)", - len(allCheckpoints), ledgerCount, - float64(ledgerCount)/(float64(60*60*24)/6.0) /* approx. ledgers per day */) - log.Infof("Index names: %s", strings.Join(allIndexNames.Slice(), ", ")) - log.Infof("Most active account: %s (%d checkpoints)", - mostActiveAccount, mostActiveAccountChk) - - return nil - }, - } - - view := &cobra.Command{ - Use: "view [accounts?]", - Long: "View the accounts in an index source or view the " + - "checkpoints specific account(s) are active in.", - Example: `view s3://indices -view s3:///indices GAXLQGKIUAIIUHAX4GJO3J7HFGLBCNF6ZCZSTLJE7EKO5IUHGLQLMXZO -view file:///tmp/indices --limit=0 GAXLQGKIUAIIUHAX4GJO3J7HFGLBCNF6ZCZSTLJE7EKO5IUHGLQLMXZO -view gcs://indices --limit=10 GAXLQGKIUAIIUHAX4GJO3J7HFGLBCNF6ZCZSTLJE7EKO5IUHGLQLMXZO,GBUUWQDVEEXBJCUF5UL24YGXKJIP5EMM7KFWIAR33KQRJR34GN6HEDPV,GBYETUYNBK2ZO5MSYBJKSLDEA2ZHIXLCFL3MMWU6RHFVAUBKEWQORYKS`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 || len(args) > 2 { - return cmd.Usage() - } - - path := args[0] - log.Infof("Analyzing indices at %s", path) - - accounts := []string{} - if len(args) == 2 { - accounts = strings.Split(args[1], ",") - } - - limit, err := cmd.Flags().GetUint("limit") - if err != nil { - return cmd.Usage() - } - - if len(accounts) > 0 { - indexName, err := cmd.Flags().GetString("index-name") - if err != nil { - return cmd.Usage() - } - - for _, account := range accounts { - if !strkey.IsValidEd25519PublicKey(account) && - !strkey.IsValidMuxedAccountEd25519PublicKey(account) { - log.Errorf("Invalid account ID: '%s'", account) - continue - } - - getIndex(path, account, indexName, limit) - } - } else { - showAccounts(path, limit) - } - - return nil - }, - } - - purge := &cobra.Command{ - Use: "purge ", - Long: "Purges all indices for the given ledger range.", - Example: `purge s3://indices 10000 10005`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 3 { - return cmd.Usage() - } - - path := args[0] - start, err := strconv.ParseUint(args[1], 10, 32) - if err != nil { - return cmd.Usage() - } - end, err := strconv.ParseUint(args[2], 10, 32) - if err != nil { - return cmd.Usage() - } - - r := historyarchive.Range{Low: uint32(start), High: uint32(end)} - log.Infof("Purging all indices from %s for ledger range: [%d, %d].", - path, r.Low, r.High) - - return purgeIndex(path, r) - }, - } - - view.Flags().Uint("limit", 10, "a maximum number of accounts or checkpoints to show") - view.Flags().String("index-name", "", "filter for a particular index") - cmd.AddCommand(stats, view, purge) - - if parent == nil { - return cmd - } - parent.AddCommand(cmd) - return parent -} - -func getIndex(path, account, indexName string, limit uint) map[uint32][]string { - freq := checkpointMgr.GetCheckpointFrequency() - - store, err := index.Connect(path) - if err != nil { - log.Fatalf("Failed to connect to index store at %s: %v", path, err) - return nil - } - - indices, err := store.Read(account) - if err != nil { - log.Fatalf("Failed to read indices for %s from index store at %s: %v", - account, path, err) - return nil - } - - // It's better to summarize activity and then group it by index rather than - // just show activity in each index, because there's likely a ton of overlap - // across indices. - activity := map[uint32][]string{} - indexNames := []string{} - - for name, idx := range indices { - log.Infof("Index found: '%s'", name) - if indexName != "" && name != indexName { - continue - } - - indexNames = append(indexNames, name) - - checkpoint, err := idx.NextActiveBit(0) - for err != io.EOF { - activity[checkpoint] = append(activity[checkpoint], name) - checkpoint, err = idx.NextActiveBit(checkpoint + 1) - - if limit > 0 && limit <= uint(len(activity)) { - break - } - } - } - - log.WithField("account", account).WithField("limit", limit). - Infof("Activity for account:") - - for checkpoint, names := range activity { - first := (checkpoint - 1) * freq - last := first + freq - - nameStr := strings.Join(names, ", ") - log.WithField("indices", nameStr). - Infof(" - checkpoint %d, ledgers [%d, %d)", checkpoint, first, last) - } - - log.Infof("Summary: %d active checkpoints, %d possible active ledgers", - len(activity), len(activity)*int(freq)) - log.Infof("Checkpoint range: [%d, %d]", - ordered.MinSlice(maps.Keys(activity)), - ordered.MaxSlice(maps.Keys(activity))) - log.Infof("All discovered indices: %s", strings.Join(indexNames, ", ")) - - return activity -} - -func showAccounts(path string, limit uint) []string { - store, err := index.Connect(path) - if err != nil { - log.Fatalf("Failed to connect to index store at %s: %v", path, err) - return nil - } - - accounts, err := store.ReadAccounts() - if err != nil { - log.Fatalf("Failed read accounts from index store at %s: %v", path, err) - return nil - } - - if limit == 0 { - limit = uint(len(accounts)) - } - - for i := uint(0); i < limit; i++ { - log.Info(accounts[i]) - } - - return accounts -} - -func purgeIndex(path string, r historyarchive.Range) error { - freq := historyarchive.DefaultCheckpointFrequency - store, err := index.Connect(path) - if err != nil { - log.Fatalf("Failed to connect to index store at %s: %v", path, err) - return err - } - - accounts, err := store.ReadAccounts() - if err != nil { - log.Fatalf("Failed read accounts: %v", err) - return err - } - - purged := 0 - for _, account := range accounts { - L := log.WithField("account", account) - - indices, err := store.Read(account) - if err != nil { - L.Errorf("Failed to read indices: %v", err) - continue - } - - for name, index := range indices { - var err error - active := uint32(0) - for err == nil { - if active*freq < r.Low { // too low, skip ahead - active, err = index.NextActiveBit(active + 1) - continue - } else if active*freq > r.High { // too high, we're done - break - } - - L.WithField("index", name). - Debugf("Purged checkpoint %d (ledgers %d through %d).", - active, active*freq, (active+1)*freq-1) - - purged++ - - index.SetInactive(active) - active, err = index.NextActiveBit(active) - } - - if err != nil && err != io.EOF { - L.WithField("index", name). - Errorf("Iterating over index failed: %v", err) - continue - } - - } - - store.AddParticipantToIndexesNoBackend(account, indices) - if err := store.Flush(); err != nil { - log.WithField("account", account). - Errorf("Flushing index failed: %v", err) - continue - } - } - - log.Infof("Purged %d values across %d accounts from all indices at %s.", - purged, len(accounts), path) - return nil -} diff --git a/exp/lighthorizon/tools/index_test.go b/exp/lighthorizon/tools/index_test.go deleted file mode 100644 index 6d42f88f30..0000000000 --- a/exp/lighthorizon/tools/index_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package tools - -import ( - "path/filepath" - "testing" - - "github.com/stellar/go/exp/lighthorizon/index" - "github.com/stellar/go/historyarchive" - "github.com/stellar/go/keypair" - "github.com/stellar/go/support/log" - "github.com/stretchr/testify/require" -) - -const ( - freq = historyarchive.DefaultCheckpointFrequency -) - -func TestIndexPurge(t *testing.T) { - log.SetLevel(log.DebugLevel) - - tempFile := "file://" + filepath.Join(t.TempDir(), "index-store") - accounts := []string{keypair.MustRandom().Address()} - - idx, err := index.Connect(tempFile) - require.NoError(t, err) - - for _, chk := range []uint32{14, 15, 16, 17, 20, 25, 123} { - require.NoError(t, idx.AddParticipantsToIndexes(chk, "test", accounts)) - } - - idx.Flush() // saves to disk - - // Try purging the index - err = purgeIndex(tempFile, historyarchive.Range{Low: 15 * freq, High: 22 * freq}) - require.NoError(t, err) - - // Check to make sure it worked. - idx, err = index.Connect(tempFile) - require.NoError(t, err) - - // Ensure that the index is in the expected state. - indices, err := idx.Read(accounts[0]) - require.NoError(t, err) - require.Contains(t, indices, "test") - - index := indices["test"] - i, err := index.NextActiveBit(0) - require.NoError(t, err) - require.EqualValues(t, 14, i) - - i, err = index.NextActiveBit(15) - require.NoError(t, err) - require.EqualValues(t, 25, i) - - i, err = index.NextActiveBit(i + 1) - require.NoError(t, err) - require.EqualValues(t, 123, i) -} diff --git a/gxdr/xdr_generated.go b/gxdr/xdr_generated.go index 7265cb5a71..0b41834ce9 100644 --- a/gxdr/xdr_generated.go +++ b/gxdr/xdr_generated.go @@ -1,4 +1,4 @@ -// Code generated by goxdr -p gxdr -enum-comments -o gxdr/xdr_generated.go xdr/Stellar-SCP.x xdr/Stellar-ledger-entries.x xdr/Stellar-ledger.x xdr/Stellar-overlay.x xdr/Stellar-transaction.x xdr/Stellar-types.x xdr/Stellar-contract-env-meta.x xdr/Stellar-contract-meta.x xdr/Stellar-contract-spec.x xdr/Stellar-contract.x xdr/Stellar-internal.x xdr/Stellar-contract-config-setting.x xdr/Stellar-lighthorizon.x xdr/Stellar-exporter.x; DO NOT EDIT. +// Code generated by goxdr -p gxdr -enum-comments -o gxdr/xdr_generated.go xdr/Stellar-SCP.x xdr/Stellar-ledger-entries.x xdr/Stellar-ledger.x xdr/Stellar-overlay.x xdr/Stellar-transaction.x xdr/Stellar-types.x xdr/Stellar-contract-env-meta.x xdr/Stellar-contract-meta.x xdr/Stellar-contract-spec.x xdr/Stellar-contract.x xdr/Stellar-internal.x xdr/Stellar-contract-config-setting.x xdr/Stellar-exporter.x; DO NOT EDIT. package gxdr @@ -4622,37 +4622,6 @@ type ConfigSettingEntry struct { _u interface{} } -type BitmapIndex struct { - FirstBit Uint32 - LastBit Uint32 - Bitmap Value -} - -type TrieIndex struct { - // goxdr gives an error if we simply use "version" as an identifier - Version_ Uint32 - Root TrieNode -} - -type TrieNodeChild struct { - Key [1]byte - Node TrieNode -} - -type TrieNode struct { - Prefix Value - Value Value - Children []TrieNodeChild -} - -type SerializedLedgerCloseMeta struct { - // The union discriminant V selects among the following arms: - // 0: - // V0() *LedgerCloseMeta - V int32 - _u interface{} -} - // Batch of ledgers along with their transaction metadata type LedgerCloseMetaBatch struct { // starting ledger sequence number in the batch @@ -30202,211 +30171,6 @@ func (u *ConfigSettingEntry) XdrRecurse(x XDR, name string) { } func XDR_ConfigSettingEntry(v *ConfigSettingEntry) *ConfigSettingEntry { return v } -type XdrType_BitmapIndex = *BitmapIndex - -func (v *BitmapIndex) XdrPointer() interface{} { return v } -func (BitmapIndex) XdrTypeName() string { return "BitmapIndex" } -func (v BitmapIndex) XdrValue() interface{} { return v } -func (v *BitmapIndex) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (v *BitmapIndex) XdrRecurse(x XDR, name string) { - if name != "" { - name = x.Sprintf("%s.", name) - } - x.Marshal(x.Sprintf("%sfirstBit", name), XDR_Uint32(&v.FirstBit)) - x.Marshal(x.Sprintf("%slastBit", name), XDR_Uint32(&v.LastBit)) - x.Marshal(x.Sprintf("%sbitmap", name), XDR_Value(&v.Bitmap)) -} -func XDR_BitmapIndex(v *BitmapIndex) *BitmapIndex { return v } - -type XdrType_TrieIndex = *TrieIndex - -func (v *TrieIndex) XdrPointer() interface{} { return v } -func (TrieIndex) XdrTypeName() string { return "TrieIndex" } -func (v TrieIndex) XdrValue() interface{} { return v } -func (v *TrieIndex) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (v *TrieIndex) XdrRecurse(x XDR, name string) { - if name != "" { - name = x.Sprintf("%s.", name) - } - x.Marshal(x.Sprintf("%sversion_", name), XDR_Uint32(&v.Version_)) - x.Marshal(x.Sprintf("%sroot", name), XDR_TrieNode(&v.Root)) -} -func XDR_TrieIndex(v *TrieIndex) *TrieIndex { return v } - -type _XdrArray_1_opaque [1]byte - -func (v *_XdrArray_1_opaque) GetByteSlice() []byte { return v[:] } -func (v *_XdrArray_1_opaque) XdrTypeName() string { return "opaque[]" } -func (v *_XdrArray_1_opaque) XdrValue() interface{} { return v[:] } -func (v *_XdrArray_1_opaque) XdrPointer() interface{} { return (*[1]byte)(v) } -func (v *_XdrArray_1_opaque) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (v *_XdrArray_1_opaque) String() string { return fmt.Sprintf("%x", v[:]) } -func (v *_XdrArray_1_opaque) Scan(ss fmt.ScanState, c rune) error { - return XdrArrayOpaqueScan(v[:], ss, c) -} -func (_XdrArray_1_opaque) XdrArraySize() uint32 { - const bound uint32 = 1 // Force error if not const or doesn't fit - return bound -} - -type XdrType_TrieNodeChild = *TrieNodeChild - -func (v *TrieNodeChild) XdrPointer() interface{} { return v } -func (TrieNodeChild) XdrTypeName() string { return "TrieNodeChild" } -func (v TrieNodeChild) XdrValue() interface{} { return v } -func (v *TrieNodeChild) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (v *TrieNodeChild) XdrRecurse(x XDR, name string) { - if name != "" { - name = x.Sprintf("%s.", name) - } - x.Marshal(x.Sprintf("%skey", name), (*_XdrArray_1_opaque)(&v.Key)) - x.Marshal(x.Sprintf("%snode", name), XDR_TrieNode(&v.Node)) -} -func XDR_TrieNodeChild(v *TrieNodeChild) *TrieNodeChild { return v } - -type _XdrVec_unbounded_TrieNodeChild []TrieNodeChild - -func (_XdrVec_unbounded_TrieNodeChild) XdrBound() uint32 { - const bound uint32 = 4294967295 // Force error if not const or doesn't fit - return bound -} -func (_XdrVec_unbounded_TrieNodeChild) XdrCheckLen(length uint32) { - if length > uint32(4294967295) { - XdrPanic("_XdrVec_unbounded_TrieNodeChild length %d exceeds bound 4294967295", length) - } else if int(length) < 0 { - XdrPanic("_XdrVec_unbounded_TrieNodeChild length %d exceeds max int", length) - } -} -func (v _XdrVec_unbounded_TrieNodeChild) GetVecLen() uint32 { return uint32(len(v)) } -func (v *_XdrVec_unbounded_TrieNodeChild) SetVecLen(length uint32) { - v.XdrCheckLen(length) - if int(length) <= cap(*v) { - if int(length) != len(*v) { - *v = (*v)[:int(length)] - } - return - } - newcap := 2 * cap(*v) - if newcap < int(length) { // also catches overflow where 2*cap < 0 - newcap = int(length) - } else if bound := uint(4294967295); uint(newcap) > bound { - if int(bound) < 0 { - bound = ^uint(0) >> 1 - } - newcap = int(bound) - } - nv := make([]TrieNodeChild, int(length), newcap) - copy(nv, *v) - *v = nv -} -func (v *_XdrVec_unbounded_TrieNodeChild) XdrMarshalN(x XDR, name string, n uint32) { - v.XdrCheckLen(n) - for i := 0; i < int(n); i++ { - if i >= len(*v) { - v.SetVecLen(uint32(i + 1)) - } - XDR_TrieNodeChild(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) - } - if int(n) < len(*v) { - *v = (*v)[:int(n)] - } -} -func (v *_XdrVec_unbounded_TrieNodeChild) XdrRecurse(x XDR, name string) { - size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} - x.Marshal(name, &size) - v.XdrMarshalN(x, name, size.Size) -} -func (_XdrVec_unbounded_TrieNodeChild) XdrTypeName() string { return "TrieNodeChild<>" } -func (v *_XdrVec_unbounded_TrieNodeChild) XdrPointer() interface{} { return (*[]TrieNodeChild)(v) } -func (v _XdrVec_unbounded_TrieNodeChild) XdrValue() interface{} { return ([]TrieNodeChild)(v) } -func (v *_XdrVec_unbounded_TrieNodeChild) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } - -type XdrType_TrieNode = *TrieNode - -func (v *TrieNode) XdrPointer() interface{} { return v } -func (TrieNode) XdrTypeName() string { return "TrieNode" } -func (v TrieNode) XdrValue() interface{} { return v } -func (v *TrieNode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (v *TrieNode) XdrRecurse(x XDR, name string) { - if name != "" { - name = x.Sprintf("%s.", name) - } - x.Marshal(x.Sprintf("%sprefix", name), XDR_Value(&v.Prefix)) - x.Marshal(x.Sprintf("%svalue", name), XDR_Value(&v.Value)) - x.Marshal(x.Sprintf("%schildren", name), (*_XdrVec_unbounded_TrieNodeChild)(&v.Children)) -} -func XDR_TrieNode(v *TrieNode) *TrieNode { return v } - -var _XdrTags_SerializedLedgerCloseMeta = map[int32]bool{ - XdrToI32(0): true, -} - -func (_ SerializedLedgerCloseMeta) XdrValidTags() map[int32]bool { - return _XdrTags_SerializedLedgerCloseMeta -} -func (u *SerializedLedgerCloseMeta) V0() *LedgerCloseMeta { - switch u.V { - case 0: - if v, ok := u._u.(*LedgerCloseMeta); ok { - return v - } else { - var zero LedgerCloseMeta - u._u = &zero - return &zero - } - default: - XdrPanic("SerializedLedgerCloseMeta.V0 accessed when V == %v", u.V) - return nil - } -} -func (u SerializedLedgerCloseMeta) XdrValid() bool { - switch u.V { - case 0: - return true - } - return false -} -func (u *SerializedLedgerCloseMeta) XdrUnionTag() XdrNum32 { - return XDR_int32(&u.V) -} -func (u *SerializedLedgerCloseMeta) XdrUnionTagName() string { - return "V" -} -func (u *SerializedLedgerCloseMeta) XdrUnionBody() XdrType { - switch u.V { - case 0: - return XDR_LedgerCloseMeta(u.V0()) - } - return nil -} -func (u *SerializedLedgerCloseMeta) XdrUnionBodyName() string { - switch u.V { - case 0: - return "V0" - } - return "" -} - -type XdrType_SerializedLedgerCloseMeta = *SerializedLedgerCloseMeta - -func (v *SerializedLedgerCloseMeta) XdrPointer() interface{} { return v } -func (SerializedLedgerCloseMeta) XdrTypeName() string { return "SerializedLedgerCloseMeta" } -func (v SerializedLedgerCloseMeta) XdrValue() interface{} { return v } -func (v *SerializedLedgerCloseMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } -func (u *SerializedLedgerCloseMeta) XdrRecurse(x XDR, name string) { - if name != "" { - name = x.Sprintf("%s.", name) - } - XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) - switch u.V { - case 0: - x.Marshal(x.Sprintf("%sv0", name), XDR_LedgerCloseMeta(u.V0())) - return - } - XdrPanic("invalid V (%v) in SerializedLedgerCloseMeta", u.V) -} -func XDR_SerializedLedgerCloseMeta(v *SerializedLedgerCloseMeta) *SerializedLedgerCloseMeta { return v } - type _XdrVec_unbounded_LedgerCloseMeta []LedgerCloseMeta func (_XdrVec_unbounded_LedgerCloseMeta) XdrBound() uint32 { diff --git a/ingest/ledgerbackend/history_archive_backend.go b/ingest/ledgerbackend/history_archive_backend.go deleted file mode 100644 index 331f43032d..0000000000 --- a/ingest/ledgerbackend/history_archive_backend.go +++ /dev/null @@ -1,51 +0,0 @@ -package ledgerbackend - -import ( - "context" - "fmt" - - "github.com/stellar/go/metaarchive" - "github.com/stellar/go/xdr" -) - -type HistoryArchiveBackend struct { - metaArchive metaarchive.MetaArchive -} - -func NewHistoryArchiveBackend(metaArchive metaarchive.MetaArchive) *HistoryArchiveBackend { - return &HistoryArchiveBackend{ - metaArchive: metaArchive, - } -} - -func (b *HistoryArchiveBackend) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { - return b.metaArchive.GetLatestLedgerSequence(ctx) -} - -func (b *HistoryArchiveBackend) PrepareRange(ctx context.Context, ledgerRange Range) error { - // Noop - return nil -} - -func (b *HistoryArchiveBackend) IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) { - // Noop - return true, nil -} - -func (b *HistoryArchiveBackend) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { - serializedLedger, err := b.metaArchive.GetLedger(ctx, sequence) - if err != nil { - return xdr.LedgerCloseMeta{}, err - } - - output, isV0 := serializedLedger.GetV0() - if !isV0 { - return xdr.LedgerCloseMeta{}, fmt.Errorf("unexpected serialized ledger version number (0x%x)", serializedLedger.V) - } - return output, nil -} - -func (b *HistoryArchiveBackend) Close() error { - // Noop - return nil -} diff --git a/metaarchive/main.go b/metaarchive/main.go deleted file mode 100644 index 7d06a46f9a..0000000000 --- a/metaarchive/main.go +++ /dev/null @@ -1,62 +0,0 @@ -package metaarchive - -import ( - "bytes" - "context" - "io" - "os" - "strconv" - - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/storage" - "github.com/stellar/go/xdr" -) - -type MetaArchive interface { - GetLatestLedgerSequence(ctx context.Context) (uint32, error) - GetLedger(ctx context.Context, sequence uint32) (xdr.SerializedLedgerCloseMeta, error) -} - -type metaArchive struct { - s storage.Storage -} - -func NewMetaArchive(b storage.Storage) MetaArchive { - return &metaArchive{s: b} -} - -func (m *metaArchive) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { - r, err := m.s.GetFile("latest") - if os.IsNotExist(err) { - return 2, nil - } else if err != nil { - return 0, errors.Wrap(err, "could not open latest ledger bucket") - } - defer r.Close() - var buf bytes.Buffer - if _, err = io.Copy(&buf, r); err != nil { - return 0, errors.Wrap(err, "could not read latest ledger") - } - parsed, err := strconv.ParseUint(buf.String(), 10, 32) - if err != nil { - return 0, errors.Wrapf(err, "could not parse latest ledger: %q", buf.String()) - } - return uint32(parsed), nil -} - -func (m *metaArchive) GetLedger(ctx context.Context, sequence uint32) (xdr.SerializedLedgerCloseMeta, error) { - var ledger xdr.SerializedLedgerCloseMeta - r, err := m.s.GetFile("ledgers/" + strconv.FormatUint(uint64(sequence), 10)) - if err != nil { - return xdr.SerializedLedgerCloseMeta{}, err - } - defer r.Close() - var buf bytes.Buffer - if _, err = io.Copy(&buf, r); err != nil { - return xdr.SerializedLedgerCloseMeta{}, err - } - if err = ledger.UnmarshalBinary(buf.Bytes()); err != nil { - return xdr.SerializedLedgerCloseMeta{}, err - } - return ledger, nil -} diff --git a/xdr/xdr_generated.go b/xdr/xdr_generated.go index c47ab760c9..fb7fb7471e 100644 --- a/xdr/xdr_generated.go +++ b/xdr/xdr_generated.go @@ -13,7 +13,6 @@ // xdr/Stellar-internal.x // xdr/Stellar-ledger-entries.x // xdr/Stellar-ledger.x -// xdr/Stellar-lighthorizon.x // xdr/Stellar-overlay.x // xdr/Stellar-transaction.x // xdr/Stellar-types.x @@ -43,7 +42,6 @@ var XdrFilesSHA256 = map[string]string{ "xdr/Stellar-internal.x": "227835866c1b2122d1eaf28839ba85ea7289d1cb681dda4ca619c2da3d71fe00", "xdr/Stellar-ledger-entries.x": "77dc7062ae6d0812136333e12e35b2294d7c2896a536be9c811eb0ed2abbbccb", "xdr/Stellar-ledger.x": "46c1c55972750b97650ff00788a2be4764975b787ef51c8fa931c56e2028a3c4", - "xdr/Stellar-lighthorizon.x": "1aac09eaeda224154f653a0c95f02167be0c110fc295bb41b756a080eb8c06df", "xdr/Stellar-overlay.x": "8c73b7c3ad974e7fc4aa4fdf34f7ad50053406254efbd7406c96657cf41691d3", "xdr/Stellar-transaction.x": "0d2b35a331a540b48643925d0869857236eb2487c02d340ea32e365e784ea2b8", "xdr/Stellar-types.x": "6e3b13f0d3e360b09fa5e2b0e55d43f4d974a769df66afb34e8aecbb329d3f15", @@ -58962,482 +58960,6 @@ func (s ConfigSettingEntry) xdrType() {} var _ xdrType = (*ConfigSettingEntry)(nil) -// BitmapIndex is an XDR Struct defines as: -// -// struct BitmapIndex { -// uint32 firstBit; -// uint32 lastBit; -// Value bitmap; -// }; -type BitmapIndex struct { - FirstBit Uint32 - LastBit Uint32 - Bitmap Value -} - -// EncodeTo encodes this value using the Encoder. -func (s *BitmapIndex) EncodeTo(e *xdr.Encoder) error { - var err error - if err = s.FirstBit.EncodeTo(e); err != nil { - return err - } - if err = s.LastBit.EncodeTo(e); err != nil { - return err - } - if err = s.Bitmap.EncodeTo(e); err != nil { - return err - } - return nil -} - -var _ decoderFrom = (*BitmapIndex)(nil) - -// DecodeFrom decodes this value using the Decoder. -func (s *BitmapIndex) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { - if maxDepth == 0 { - return 0, fmt.Errorf("decoding BitmapIndex: %w", ErrMaxDecodingDepthReached) - } - maxDepth -= 1 - var err error - var n, nTmp int - nTmp, err = s.FirstBit.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Uint32: %w", err) - } - nTmp, err = s.LastBit.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Uint32: %w", err) - } - nTmp, err = s.Bitmap.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Value: %w", err) - } - return n, nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s BitmapIndex) MarshalBinary() ([]byte, error) { - b := bytes.Buffer{} - e := xdr.NewEncoder(&b) - err := s.EncodeTo(e) - return b.Bytes(), err -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (s *BitmapIndex) UnmarshalBinary(inp []byte) error { - r := bytes.NewReader(inp) - o := xdr.DefaultDecodeOptions - o.MaxInputLen = len(inp) - d := xdr.NewDecoderWithOptions(r, o) - _, err := s.DecodeFrom(d, o.MaxDepth) - return err -} - -var ( - _ encoding.BinaryMarshaler = (*BitmapIndex)(nil) - _ encoding.BinaryUnmarshaler = (*BitmapIndex)(nil) -) - -// xdrType signals that this type represents XDR values defined by this package. -func (s BitmapIndex) xdrType() {} - -var _ xdrType = (*BitmapIndex)(nil) - -// TrieIndex is an XDR Struct defines as: -// -// struct TrieIndex { -// uint32 version_; // goxdr gives an error if we simply use "version" as an identifier -// TrieNode root; -// }; -type TrieIndex struct { - Version Uint32 - Root TrieNode -} - -// EncodeTo encodes this value using the Encoder. -func (s *TrieIndex) EncodeTo(e *xdr.Encoder) error { - var err error - if err = s.Version.EncodeTo(e); err != nil { - return err - } - if err = s.Root.EncodeTo(e); err != nil { - return err - } - return nil -} - -var _ decoderFrom = (*TrieIndex)(nil) - -// DecodeFrom decodes this value using the Decoder. -func (s *TrieIndex) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { - if maxDepth == 0 { - return 0, fmt.Errorf("decoding TrieIndex: %w", ErrMaxDecodingDepthReached) - } - maxDepth -= 1 - var err error - var n, nTmp int - nTmp, err = s.Version.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Uint32: %w", err) - } - nTmp, err = s.Root.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding TrieNode: %w", err) - } - return n, nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s TrieIndex) MarshalBinary() ([]byte, error) { - b := bytes.Buffer{} - e := xdr.NewEncoder(&b) - err := s.EncodeTo(e) - return b.Bytes(), err -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (s *TrieIndex) UnmarshalBinary(inp []byte) error { - r := bytes.NewReader(inp) - o := xdr.DefaultDecodeOptions - o.MaxInputLen = len(inp) - d := xdr.NewDecoderWithOptions(r, o) - _, err := s.DecodeFrom(d, o.MaxDepth) - return err -} - -var ( - _ encoding.BinaryMarshaler = (*TrieIndex)(nil) - _ encoding.BinaryUnmarshaler = (*TrieIndex)(nil) -) - -// xdrType signals that this type represents XDR values defined by this package. -func (s TrieIndex) xdrType() {} - -var _ xdrType = (*TrieIndex)(nil) - -// TrieNodeChild is an XDR Struct defines as: -// -// struct TrieNodeChild { -// opaque key[1]; -// TrieNode node; -// }; -type TrieNodeChild struct { - Key [1]byte `xdrmaxsize:"1"` - Node TrieNode -} - -// EncodeTo encodes this value using the Encoder. -func (s *TrieNodeChild) EncodeTo(e *xdr.Encoder) error { - var err error - if _, err = e.EncodeFixedOpaque(s.Key[:]); err != nil { - return err - } - if err = s.Node.EncodeTo(e); err != nil { - return err - } - return nil -} - -var _ decoderFrom = (*TrieNodeChild)(nil) - -// DecodeFrom decodes this value using the Decoder. -func (s *TrieNodeChild) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { - if maxDepth == 0 { - return 0, fmt.Errorf("decoding TrieNodeChild: %w", ErrMaxDecodingDepthReached) - } - maxDepth -= 1 - var err error - var n, nTmp int - nTmp, err = d.DecodeFixedOpaqueInplace(s.Key[:]) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Key: %w", err) - } - nTmp, err = s.Node.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding TrieNode: %w", err) - } - return n, nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s TrieNodeChild) MarshalBinary() ([]byte, error) { - b := bytes.Buffer{} - e := xdr.NewEncoder(&b) - err := s.EncodeTo(e) - return b.Bytes(), err -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (s *TrieNodeChild) UnmarshalBinary(inp []byte) error { - r := bytes.NewReader(inp) - o := xdr.DefaultDecodeOptions - o.MaxInputLen = len(inp) - d := xdr.NewDecoderWithOptions(r, o) - _, err := s.DecodeFrom(d, o.MaxDepth) - return err -} - -var ( - _ encoding.BinaryMarshaler = (*TrieNodeChild)(nil) - _ encoding.BinaryUnmarshaler = (*TrieNodeChild)(nil) -) - -// xdrType signals that this type represents XDR values defined by this package. -func (s TrieNodeChild) xdrType() {} - -var _ xdrType = (*TrieNodeChild)(nil) - -// TrieNode is an XDR Struct defines as: -// -// struct TrieNode { -// Value prefix; -// Value value; -// TrieNodeChild children<>; -// }; -type TrieNode struct { - Prefix Value - Value Value - Children []TrieNodeChild -} - -// EncodeTo encodes this value using the Encoder. -func (s *TrieNode) EncodeTo(e *xdr.Encoder) error { - var err error - if err = s.Prefix.EncodeTo(e); err != nil { - return err - } - if err = s.Value.EncodeTo(e); err != nil { - return err - } - if _, err = e.EncodeUint(uint32(len(s.Children))); err != nil { - return err - } - for i := 0; i < len(s.Children); i++ { - if err = s.Children[i].EncodeTo(e); err != nil { - return err - } - } - return nil -} - -var _ decoderFrom = (*TrieNode)(nil) - -// DecodeFrom decodes this value using the Decoder. -func (s *TrieNode) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { - if maxDepth == 0 { - return 0, fmt.Errorf("decoding TrieNode: %w", ErrMaxDecodingDepthReached) - } - maxDepth -= 1 - var err error - var n, nTmp int - nTmp, err = s.Prefix.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Value: %w", err) - } - nTmp, err = s.Value.DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Value: %w", err) - } - var l uint32 - l, nTmp, err = d.DecodeUint() - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding TrieNodeChild: %w", err) - } - s.Children = nil - if l > 0 { - if il, ok := d.InputLen(); ok && uint(il) < uint(l) { - return n, fmt.Errorf("decoding TrieNodeChild: length (%d) exceeds remaining input length (%d)", l, il) - } - s.Children = make([]TrieNodeChild, l) - for i := uint32(0); i < l; i++ { - nTmp, err = s.Children[i].DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding TrieNodeChild: %w", err) - } - } - } - return n, nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s TrieNode) MarshalBinary() ([]byte, error) { - b := bytes.Buffer{} - e := xdr.NewEncoder(&b) - err := s.EncodeTo(e) - return b.Bytes(), err -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (s *TrieNode) UnmarshalBinary(inp []byte) error { - r := bytes.NewReader(inp) - o := xdr.DefaultDecodeOptions - o.MaxInputLen = len(inp) - d := xdr.NewDecoderWithOptions(r, o) - _, err := s.DecodeFrom(d, o.MaxDepth) - return err -} - -var ( - _ encoding.BinaryMarshaler = (*TrieNode)(nil) - _ encoding.BinaryUnmarshaler = (*TrieNode)(nil) -) - -// xdrType signals that this type represents XDR values defined by this package. -func (s TrieNode) xdrType() {} - -var _ xdrType = (*TrieNode)(nil) - -// SerializedLedgerCloseMeta is an XDR Union defines as: -// -// union SerializedLedgerCloseMeta switch (int v) -// { -// case 0: -// LedgerCloseMeta v0; -// }; -type SerializedLedgerCloseMeta struct { - V int32 - V0 *LedgerCloseMeta -} - -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u SerializedLedgerCloseMeta) SwitchFieldName() string { - return "V" -} - -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of SerializedLedgerCloseMeta -func (u SerializedLedgerCloseMeta) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "V0", true - } - return "-", false -} - -// NewSerializedLedgerCloseMeta creates a new SerializedLedgerCloseMeta. -func NewSerializedLedgerCloseMeta(v int32, value interface{}) (result SerializedLedgerCloseMeta, err error) { - result.V = v - switch int32(v) { - case 0: - tv, ok := value.(LedgerCloseMeta) - if !ok { - err = errors.New("invalid value, must be LedgerCloseMeta") - return - } - result.V0 = &tv - } - return -} - -// MustV0 retrieves the V0 value from the union, -// panicing if the value is not set. -func (u SerializedLedgerCloseMeta) MustV0() LedgerCloseMeta { - val, ok := u.GetV0() - - if !ok { - panic("arm V0 is not set") - } - - return val -} - -// GetV0 retrieves the V0 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u SerializedLedgerCloseMeta) GetV0() (result LedgerCloseMeta, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.V)) - - if armName == "V0" { - result = *u.V0 - ok = true - } - - return -} - -// EncodeTo encodes this value using the Encoder. -func (u SerializedLedgerCloseMeta) EncodeTo(e *xdr.Encoder) error { - var err error - if _, err = e.EncodeInt(int32(u.V)); err != nil { - return err - } - switch int32(u.V) { - case 0: - if err = (*u.V0).EncodeTo(e); err != nil { - return err - } - return nil - } - return fmt.Errorf("V (int32) switch value '%d' is not valid for union SerializedLedgerCloseMeta", u.V) -} - -var _ decoderFrom = (*SerializedLedgerCloseMeta)(nil) - -// DecodeFrom decodes this value using the Decoder. -func (u *SerializedLedgerCloseMeta) DecodeFrom(d *xdr.Decoder, maxDepth uint) (int, error) { - if maxDepth == 0 { - return 0, fmt.Errorf("decoding SerializedLedgerCloseMeta: %w", ErrMaxDecodingDepthReached) - } - maxDepth -= 1 - var err error - var n, nTmp int - u.V, nTmp, err = d.DecodeInt() - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding Int: %w", err) - } - switch int32(u.V) { - case 0: - u.V0 = new(LedgerCloseMeta) - nTmp, err = (*u.V0).DecodeFrom(d, maxDepth) - n += nTmp - if err != nil { - return n, fmt.Errorf("decoding LedgerCloseMeta: %w", err) - } - return n, nil - } - return n, fmt.Errorf("union SerializedLedgerCloseMeta has invalid V (int32) switch value '%d'", u.V) -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s SerializedLedgerCloseMeta) MarshalBinary() ([]byte, error) { - b := bytes.Buffer{} - e := xdr.NewEncoder(&b) - err := s.EncodeTo(e) - return b.Bytes(), err -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (s *SerializedLedgerCloseMeta) UnmarshalBinary(inp []byte) error { - r := bytes.NewReader(inp) - o := xdr.DefaultDecodeOptions - o.MaxInputLen = len(inp) - d := xdr.NewDecoderWithOptions(r, o) - _, err := s.DecodeFrom(d, o.MaxDepth) - return err -} - -var ( - _ encoding.BinaryMarshaler = (*SerializedLedgerCloseMeta)(nil) - _ encoding.BinaryUnmarshaler = (*SerializedLedgerCloseMeta)(nil) -) - -// xdrType signals that this type represents XDR values defined by this package. -func (s SerializedLedgerCloseMeta) xdrType() {} - -var _ xdrType = (*SerializedLedgerCloseMeta)(nil) - // LedgerCloseMetaBatch is an XDR Struct defines as: // // struct LedgerCloseMetaBatch