diff --git a/.github/workflows/beekeeper.yml b/.github/workflows/beekeeper.yml index 844d8b21ecc..6b56f48e515 100644 --- a/.github/workflows/beekeeper.yml +++ b/.github/workflows/beekeeper.yml @@ -12,7 +12,7 @@ env: REPLICA: 3 RUN_TYPE: "PR RUN" SETUP_CONTRACT_IMAGE: "ethersphere/bee-localchain" - SETUP_CONTRACT_IMAGE_TAG: "0.9.1-rc6" + SETUP_CONTRACT_IMAGE_TAG: "0.9.2-rc1" BEELOCAL_BRANCH: "main" BEEKEEPER_BRANCH: "master" BEEKEEPER_METRICS_ENABLED: false diff --git a/cmd/bee/cmd/cmd.go b/cmd/bee/cmd/cmd.go index c1152a4c37c..5729be19a11 100644 --- a/cmd/bee/cmd/cmd.go +++ b/cmd/bee/cmd/cmd.go @@ -49,9 +49,6 @@ const ( optionNamePaymentEarly = "payment-early-percent" optionNameResolverEndpoints = "resolver-options" optionNameBootnodeMode = "bootnode-mode" - optionNameClefSignerEnable = "clef-signer-enable" - optionNameClefSignerEndpoint = "clef-signer-endpoint" - optionNameClefSignerEthereumAddress = "clef-signer-ethereum-address" optionNameSwapEndpoint = "swap-endpoint" // deprecated: use rpc endpoint instead optionNameBlockchainRpcEndpoint = "blockchain-rpc-endpoint" optionNameSwapFactoryAddress = "swap-factory-address" @@ -84,6 +81,7 @@ const ( optionNameWhitelistedWithdrawalAddress = "withdrawal-addresses-whitelist" optionNameTransactionDebugMode = "transaction-debug-mode" optionMinimumStorageRadius = "minimum-storage-radius" + optionReserveCapacityDoubling = "reserve-capacity-doubling" ) // nolint:gochecknoinits @@ -263,9 +261,6 @@ func (c *command) setAllFlags(cmd *cobra.Command) { cmd.Flags().Int64(optionNamePaymentEarly, 50, "percentage below the peers payment threshold when we initiate settlement") cmd.Flags().StringSlice(optionNameResolverEndpoints, []string{}, "ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url") cmd.Flags().Bool(optionNameBootnodeMode, false, "cause the node to always accept incoming connections") - cmd.Flags().Bool(optionNameClefSignerEnable, false, "enable clef signer") - cmd.Flags().String(optionNameClefSignerEndpoint, "", "clef signer endpoint") - cmd.Flags().String(optionNameClefSignerEthereumAddress, "", "blockchain address to use from clef signer") cmd.Flags().String(optionNameSwapEndpoint, "", "swap blockchain endpoint") // deprecated: use rpc endpoint instead cmd.Flags().String(optionNameBlockchainRpcEndpoint, "", "rpc blockchain endpoint") cmd.Flags().String(optionNameSwapFactoryAddress, "", "swap factory addresses") @@ -296,6 +291,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) { cmd.Flags().StringSlice(optionNameWhitelistedWithdrawalAddress, []string{}, "withdrawal target addresses") cmd.Flags().Bool(optionNameTransactionDebugMode, false, "skips the gas estimate step for contract transactions") cmd.Flags().Uint(optionMinimumStorageRadius, 0, "minimum radius storage threshold") + cmd.Flags().Int(optionReserveCapacityDoubling, 0, "reserve capacity doubling") } func newLogger(cmd *cobra.Command, verbosity string) (log.Logger, error) { diff --git a/cmd/bee/cmd/db.go b/cmd/bee/cmd/db.go index 2d8b456efc8..62eee2591c2 100644 --- a/cmd/bee/cmd/db.go +++ b/cmd/bee/cmd/db.go @@ -84,7 +84,7 @@ func dbInfoCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { @@ -166,7 +166,7 @@ func dbCompactCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, validation) if err != nil { return fmt.Errorf("localstore: %w", err) @@ -221,7 +221,7 @@ func dbValidatePinsCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }) if err != nil { return fmt.Errorf("localstore: %w", err) @@ -283,7 +283,7 @@ func dbRepairReserve(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { @@ -347,7 +347,7 @@ func dbValidateCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }) if err != nil { return fmt.Errorf("localstore: %w", err) @@ -410,7 +410,7 @@ func dbExportReserveCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { @@ -493,7 +493,7 @@ func dbExportPinningCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { @@ -603,7 +603,7 @@ func dbImportReserveCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { @@ -687,7 +687,7 @@ func dbImportPinningCmd(cmd *cobra.Command) { Logger: logger, RadiusSetter: noopRadiusSetter{}, Batchstore: new(postage.NoOpBatchStore), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, CacheCapacity: 1_000_000, }) if err != nil { diff --git a/cmd/bee/cmd/db_test.go b/cmd/bee/cmd/db_test.go index 497006f31a8..f52da4bd0d2 100644 --- a/cmd/bee/cmd/db_test.go +++ b/cmd/bee/cmd/db_test.go @@ -14,7 +14,6 @@ import ( "github.com/ethersphere/bee/v2/cmd/bee/cmd" "github.com/ethersphere/bee/v2/pkg/log" - "github.com/ethersphere/bee/v2/pkg/node" "github.com/ethersphere/bee/v2/pkg/postage" storagetest "github.com/ethersphere/bee/v2/pkg/storage/testing" "github.com/ethersphere/bee/v2/pkg/storer" @@ -35,7 +34,7 @@ func TestDBExportImport(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: testutil.NewLogger(t), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dir1) chunks := make(map[string]int) @@ -64,7 +63,7 @@ func TestDBExportImport(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: testutil.NewLogger(t), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dir2) err = db2.ReserveIterateChunks(func(chunk swarm.Chunk) (bool, error) { @@ -95,7 +94,7 @@ func TestDBExportImportPinning(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: testutil.NewLogger(t), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dir1) chunks := make(map[string]int) @@ -139,7 +138,7 @@ func TestDBExportImportPinning(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: testutil.NewLogger(t), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dir2) addresses, err := db2.Pins() if err != nil { @@ -183,7 +182,7 @@ func TestDBNuke_FLAKY(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: log.Noop, - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dataDir) nChunks := 10 @@ -213,7 +212,7 @@ func TestDBNuke_FLAKY(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: log.Noop, - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, path.Join(dataDir, "localstore")) if err != nil { t.Fatal(err) @@ -238,7 +237,7 @@ func TestDBInfo(t *testing.T) { Batchstore: new(postage.NoOpBatchStore), RadiusSetter: kademlia.NewTopologyDriver(), Logger: testutil.NewLogger(t), - ReserveCapacity: node.ReserveCapacity, + ReserveCapacity: storer.DefaultReserveCapacity, }, dir1) nChunks := 10 @@ -265,7 +264,7 @@ func TestDBInfo(t *testing.T) { t.Fatal(err) } - if !strings.Contains(buf.String(), fmt.Sprintf("\"msg\"=\"reserve\" \"size_within_radius\"=%d \"total_size\"=%d \"capacity\"=%d", nChunks, nChunks, node.ReserveCapacity)) { + if !strings.Contains(buf.String(), fmt.Sprintf("\"msg\"=\"reserve\" \"size_within_radius\"=%d \"total_size\"=%d \"capacity\"=%d", nChunks, nChunks, storer.DefaultReserveCapacity)) { t.Fatal("reserve info not correct") } } diff --git a/cmd/bee/cmd/start.go b/cmd/bee/cmd/start.go index 96748831b80..706908844e4 100644 --- a/cmd/bee/cmd/start.go +++ b/cmd/bee/cmd/start.go @@ -20,21 +20,16 @@ import ( "syscall" "time" - "github.com/ethereum/go-ethereum/accounts/external" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethersphere/bee/v2" "github.com/ethersphere/bee/v2/pkg/accesscontrol" chaincfg "github.com/ethersphere/bee/v2/pkg/config" "github.com/ethersphere/bee/v2/pkg/crypto" - "github.com/ethersphere/bee/v2/pkg/crypto/clef" "github.com/ethersphere/bee/v2/pkg/keystore" filekeystore "github.com/ethersphere/bee/v2/pkg/keystore/file" memkeystore "github.com/ethersphere/bee/v2/pkg/keystore/mem" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/node" "github.com/ethersphere/bee/v2/pkg/resolver/multiresolver" - "github.com/ethersphere/bee/v2/pkg/spinlock" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/kardianos/service" "github.com/spf13/cobra" @@ -340,6 +335,7 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo WhitelistedWithdrawalAddress: c.config.GetStringSlice(optionNameWhitelistedWithdrawalAddress), TrxDebugMode: c.config.GetBool(optionNameTransactionDebugMode), MinimumStorageRadius: c.config.GetUint(optionMinimumStorageRadius), + ReserveCapacityDoubling: c.config.GetInt(optionReserveCapacityDoubling), }) return b, err @@ -369,22 +365,6 @@ type signerConfig struct { session accesscontrol.Session } -func waitForClef(logger log.Logger, maxRetries uint64, endpoint string) (externalSigner *external.ExternalSigner, err error) { - var ( - interval = time.Second * 5 - timeout = interval * time.Duration(maxRetries) - ) - - spinErr := spinlock.WaitWithInterval(timeout, interval, func() bool { - externalSigner, err = external.NewExternalSigner(endpoint) - return err == nil - }) - if spinErr != nil { - logger.Warning("connect to clef signer failed", "error", err) - } - return -} - func (c *command) configureSigner(cmd *cobra.Command, logger log.Logger) (config *signerConfig, err error) { var keystore keystore.Service if c.config.GetString(optionNameDataDir) == "" { @@ -427,51 +407,13 @@ func (c *command) configureSigner(cmd *cobra.Command, logger log.Logger) (config } } - if c.config.GetBool(optionNameClefSignerEnable) { - endpoint := c.config.GetString(optionNameClefSignerEndpoint) - if endpoint == "" { - endpoint, err = clef.DefaultIpcPath() - if err != nil { - return nil, err - } - } - - externalSigner, err := waitForClef(logger, 5, endpoint) - if err != nil { - return nil, err - } - - clefRPC, err := rpc.Dial(endpoint) - if err != nil { - return nil, err - } - - wantedAddress := c.config.GetString(optionNameClefSignerEthereumAddress) - var overlayEthAddress *common.Address = nil - // if wantedAddress was specified use that, otherwise clef account 0 will be selected. - if wantedAddress != "" { - ethAddress := common.HexToAddress(wantedAddress) - overlayEthAddress = ðAddress - } - - signer, err = clef.NewSigner(externalSigner, clefRPC, crypto.Recover, overlayEthAddress) - if err != nil { - return nil, err - } - - publicKey, err = signer.PublicKey() - if err != nil { - return nil, err - } - } else { - swarmPrivateKey, _, err := keystore.Key("swarm", password, crypto.EDGSecp256_K1) - if err != nil { - return nil, fmt.Errorf("swarm key: %w", err) - } - signer = crypto.NewDefaultSigner(swarmPrivateKey) - publicKey = &swarmPrivateKey.PublicKey - session = accesscontrol.NewDefaultSession(swarmPrivateKey) + swarmPrivateKey, _, err := keystore.Key("swarm", password, crypto.EDGSecp256_K1) + if err != nil { + return nil, fmt.Errorf("swarm key: %w", err) } + signer = crypto.NewDefaultSigner(swarmPrivateKey) + publicKey = &swarmPrivateKey.PublicKey + session = accesscontrol.NewDefaultSession(swarmPrivateKey) logger.Info("swarm public key", "public_key", hex.EncodeToString(crypto.EncodeSecp256k1PublicKey(publicKey))) diff --git a/go.mod b/go.mod index 462904dba9a..f37fc36bebc 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/coreos/go-semver v0.3.0 github.com/ethereum/go-ethereum v1.14.3 github.com/ethersphere/go-price-oracle-abi v0.2.0 - github.com/ethersphere/go-storage-incentives-abi v0.9.1-rc6 + github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc1 github.com/ethersphere/go-sw3-abi v0.6.5 github.com/ethersphere/langos v1.0.0 github.com/go-playground/validator/v10 v10.11.1 diff --git a/go.sum b/go.sum index 3c53a47dc42..b837278926d 100644 --- a/go.sum +++ b/go.sum @@ -236,8 +236,8 @@ github.com/ethereum/go-ethereum v1.14.3 h1:5zvnAqLtnCZrU9uod1JCvHWJbPMURzYFHfc2e github.com/ethereum/go-ethereum v1.14.3/go.mod h1:1STrq471D0BQbCX9He0hUj4bHxX2k6mt5nOQJhDNOJ8= github.com/ethersphere/go-price-oracle-abi v0.2.0 h1:wtIcYLgNZHY4BjYwJCnu93SvJdVAZVvBaKinspyyHvQ= github.com/ethersphere/go-price-oracle-abi v0.2.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk= -github.com/ethersphere/go-storage-incentives-abi v0.9.1-rc6 h1:wWSHAF1siVmiMj9koBTrJ8Vl9fK1I9w9DL3so+Ye5ss= -github.com/ethersphere/go-storage-incentives-abi v0.9.1-rc6/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= +github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc1 h1:Cf3LFlz87FqlTqcuN4q4Hry4iUaAbbroaFxpCgHVhtY= +github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc1/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= github.com/ethersphere/go-sw3-abi v0.6.5 h1:M5dcIe1zQYvGpY2K07UNkNU9Obc4U+A1fz68Ho/Q+XE= github.com/ethersphere/go-sw3-abi v0.6.5/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU= github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc= diff --git a/openapi/Swarm.yaml b/openapi/Swarm.yaml index 2542fe4abef..c2a08d6796d 100644 --- a/openapi/Swarm.yaml +++ b/openapi/Swarm.yaml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: - version: 7.1.0 + version: 7.2.0 title: Bee API description: "A list of the currently provided Interfaces to interact with the swarm, implementing file operations and sending messages" @@ -237,6 +237,7 @@ paths: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyLevelParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" @@ -424,6 +425,7 @@ paths: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyLevelParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" @@ -862,11 +864,12 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/HexString" required: true description: Signature - - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" - in: header name: swarm-postage-batch-id schema: $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" + required: true + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" required: false - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageStamp" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct" @@ -899,6 +902,47 @@ paths: $ref: "SwarmCommon.yaml#/components/responses/500" default: description: Default response + get: + summary: Resolve Single Owner Chunk data + tags: + - Single owner chunk + parameters: + - in: path + name: owner + schema: + $ref: "SwarmCommon.yaml#/components/schemas/EthereumAddress" + required: true + description: Ethereum address of the Owner of the SOC + - in: path + name: id + schema: + $ref: "SwarmCommon.yaml#/components/schemas/HexString" + required: true + description: Arbitrary identifier of the related data + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmOnlyRootChunkParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" + responses: + "200": + description: Related Single Owner Chunk data + headers: + "swarm-soc-signature": + $ref: "SwarmCommon.yaml#/components/headers/SwarmSocSignature" + content: + application/octet-stream: + schema: + type: string + format: binary + "400": + $ref: "SwarmCommon.yaml#/components/responses/400" + "401": + $ref: "SwarmCommon.yaml#/components/responses/401" + "500": + $ref: "SwarmCommon.yaml#/components/responses/500" + default: + description: Default response "/feeds/{owner}/{topic}": post: @@ -983,18 +1027,26 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/FeedType" required: false description: "Feed indexing scheme (default: sequence)" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmOnlyRootChunkParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" responses: "200": description: Latest feed update headers: + "swarm-soc-signature": + $ref: "SwarmCommon.yaml#/components/headers/SwarmSocSignature" "swarm-feed-index": $ref: "SwarmCommon.yaml#/components/headers/SwarmFeedIndex" "swarm-feed-index-next": $ref: "SwarmCommon.yaml#/components/headers/SwarmFeedIndexNext" content: - application/json: + application/octet-stream: schema: - $ref: "SwarmCommon.yaml#/components/schemas/ReferenceResponse" + type: string + format: binary "400": $ref: "SwarmCommon.yaml#/components/responses/400" "401": @@ -2418,6 +2470,23 @@ paths: default: description: Default response. + "/status/neighborhoods": + get: + summary: Get the current neighborhoods status of this node. + tags: + - Node Status + responses: + "200": + description: Returns the neighborhoods status of this node + content: + application/json: + schema: + $ref: "SwarmCommon.yaml#/components/schemas/StatusNeighborhoodsResponse" + "400": + $ref: "SwarmCommon.yaml#/components/responses/400" + default: + description: Default response. + components: securitySchemes: basicAuth: diff --git a/openapi/SwarmCommon.yaml b/openapi/SwarmCommon.yaml index 92027be2a34..f871006c7e2 100644 --- a/openapi/SwarmCommon.yaml +++ b/openapi/SwarmCommon.yaml @@ -1,6 +1,6 @@ openapi: 3.0.3 info: - version: 4.1.0 + version: 4.2.0 title: Common Data Types description: | \*****bzzz***** @@ -242,6 +242,11 @@ components: type: string example: "5.0018ms" + Seconds: + description: Go time.Duration format in seconds + type: number + example: 30.5 + EthereumAddress: type: string pattern: "^[A-Fa-f0-9]{40}$" @@ -933,6 +938,30 @@ components: items: $ref: "#/components/schemas/StatusSnapshotResponse" + StatusNeighborhoodResponse: + type: object + properties: + neighborhood: + $ref: "#/components/schemas/Neighborhood" + reserveSizeWithinRadius: + type: integer + proximity: + type: integer + + Neighborhood: + type: string + description: Swarm address of a neighborhood in string binary format, usually limited to as many bits as the current storage radius. + example: "011010111" + + StatusNeighborhoodsResponse: + type: object + properties: + stamps: + type: array + nullable: false + items: + $ref: "#/components/schemas/StatusNeighborhoodResponse" + ApiChunkInclusionProof: type: object properties: @@ -991,8 +1020,8 @@ components: ApiRCHashResponse: type: object properties: - duration: - type: integer + durationSeconds: + $ref: "#/components/schemas/Seconds" hash: $ref: "#/components/schemas/SwarmAddress" proofs: @@ -1031,6 +1060,11 @@ components: schema: $ref: "#/components/schemas/HexString" + SwarmSocSignature: + description: "Attached digital signature of the Single Owner Chunk" + schema: + $ref: "#/components/schemas/HexString" + SwarmActHistoryAddress: description: "Swarm address reference to the new ACT history entry" schema: @@ -1136,6 +1170,14 @@ components: description: > Specify the timeout for chunk retrieval. The default is 30 seconds. + SwarmOnlyRootChunkParameter: + in: header + name: swarm-only-root-chunk + schema: + type: boolean + required: false + description: "Returns only the root chunk of the content" + ContentTypePreserved: in: header name: Content-Type diff --git a/packaging/bee.yaml b/packaging/bee.yaml index 1db9dc1b4dc..24bf57d38ad 100644 --- a/packaging/bee.yaml +++ b/packaging/bee.yaml @@ -76,3 +76,5 @@ password-file: "/var/lib/bee/password" # mainnet: true ## minimum radius storage threshold # minimum-storage-radius: 0 +## reserve capacity doubling (default 0, maximum 1) +reserve-capacity-doubling: 0 diff --git a/packaging/homebrew-amd64/bee.yaml b/packaging/homebrew-amd64/bee.yaml index d563b968a8d..4632b0d324d 100644 --- a/packaging/homebrew-amd64/bee.yaml +++ b/packaging/homebrew-amd64/bee.yaml @@ -76,3 +76,5 @@ password-file: "/usr/local/var/lib/swarm-bee/password" # mainnet: true # ## minimum radius storage threshold # minimum-storage-radius: 0 +## reserve capacity doubling (default 0, maximum 1) +reserve-capacity-doubling: 0 diff --git a/packaging/homebrew-arm64/bee.yaml b/packaging/homebrew-arm64/bee.yaml index 5f7704cdc9d..0e3a2c1b750 100644 --- a/packaging/homebrew-arm64/bee.yaml +++ b/packaging/homebrew-arm64/bee.yaml @@ -76,3 +76,5 @@ password-file: "/opt/homebrew/var/lib/swarm-bee/password" # mainnet: true ## minimum radius storage threshold # minimum-storage-radius: 0 +## reserve capacity doubling (default 0, maximum 1) +reserve-capacity-doubling: 0 diff --git a/packaging/scoop/bee.yaml b/packaging/scoop/bee.yaml index 3cf5dd8b3b5..c877e447233 100644 --- a/packaging/scoop/bee.yaml +++ b/packaging/scoop/bee.yaml @@ -66,3 +66,5 @@ password-file: "./password" # mainnet: true ## minimum radius storage threshold # minimum-storage-radius: 0 +## reserve capacity doubling (default 0, maximum 1) +reserve-capacity-doubling: 0 diff --git a/pkg/api/api.go b/pkg/api/api.go index 807278b0287..bced503484c 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -75,8 +75,10 @@ const ( SwarmEncryptHeader = "Swarm-Encrypt" SwarmIndexDocumentHeader = "Swarm-Index-Document" SwarmErrorDocumentHeader = "Swarm-Error-Document" + SwarmSocSignatureHeader = "Swarm-Soc-Signature" SwarmFeedIndexHeader = "Swarm-Feed-Index" SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next" + SwarmOnlyRootChunk = "Swarm-Only-Root-Chunk" SwarmCollectionHeader = "Swarm-Collection" SwarmPostageBatchIdHeader = "Swarm-Postage-Batch-Id" SwarmPostageStampHeader = "Swarm-Postage-Stamp" @@ -139,6 +141,7 @@ type Storer interface { storer.LocalStore storer.RadiusChecker storer.Debugger + storer.NeighborhoodStats } type PinIntegrity interface { @@ -524,7 +527,7 @@ func (s *Service) corsHandler(h http.Handler) http.Handler { allowedHeaders := []string{ "User-Agent", "Accept", "X-Requested-With", "Access-Control-Request-Headers", "Access-Control-Request-Method", "Accept-Ranges", "Content-Encoding", AuthorizationHeader, AcceptEncodingHeader, ContentTypeHeader, ContentDispositionHeader, RangeHeader, OriginHeader, - SwarmTagHeader, SwarmPinHeader, SwarmEncryptHeader, SwarmIndexDocumentHeader, SwarmErrorDocumentHeader, SwarmCollectionHeader, SwarmPostageBatchIdHeader, SwarmPostageStampHeader, SwarmDeferredUploadHeader, SwarmRedundancyLevelHeader, SwarmRedundancyStrategyHeader, SwarmRedundancyFallbackModeHeader, SwarmChunkRetrievalTimeoutHeader, SwarmLookAheadBufferSizeHeader, SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, GasPriceHeader, GasLimitHeader, ImmutableHeader, + SwarmTagHeader, SwarmPinHeader, SwarmEncryptHeader, SwarmIndexDocumentHeader, SwarmErrorDocumentHeader, SwarmCollectionHeader, SwarmPostageBatchIdHeader, SwarmPostageStampHeader, SwarmDeferredUploadHeader, SwarmRedundancyLevelHeader, SwarmRedundancyStrategyHeader, SwarmRedundancyFallbackModeHeader, SwarmChunkRetrievalTimeoutHeader, SwarmLookAheadBufferSizeHeader, SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader, SwarmOnlyRootChunk, GasPriceHeader, GasLimitHeader, ImmutableHeader, } allowedHeadersStr := strings.Join(allowedHeaders, ", ") diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index f19a3646e44..dc78dd64077 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -712,6 +712,7 @@ func createRedistributionAgentService( tranService, &mockHealth{}, log.Noop, + 0, ) } diff --git a/pkg/api/bytes.go b/pkg/api/bytes.go index 11a600f854b..5eabf41458b 100644 --- a/pkg/api/bytes.go +++ b/pkg/api/bytes.go @@ -182,7 +182,7 @@ func (s *Service) bytesGetHandler(w http.ResponseWriter, r *http.Request) { ContentTypeHeader: {"application/octet-stream"}, } - s.downloadHandler(logger, w, r, address, additionalHeaders, true, false) + s.downloadHandler(logger, w, r, address, additionalHeaders, true, false, nil) } func (s *Service) bytesHeadHandler(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/api/bzz.go b/pkg/api/bzz.go index 539779cd2d6..a07f3f0f54f 100644 --- a/pkg/api/bzz.go +++ b/pkg/api/bzz.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethersphere/bee/v2/pkg/accesscontrol" "github.com/ethersphere/bee/v2/pkg/feeds" + "github.com/ethersphere/bee/v2/pkg/file" "github.com/ethersphere/bee/v2/pkg/file/joiner" "github.com/ethersphere/bee/v2/pkg/file/loadsave" "github.com/ethersphere/bee/v2/pkg/file/redundancy" @@ -362,10 +363,11 @@ func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathV loggerV1 := logger.V(1).Build() headers := struct { - Cache *bool `map:"Swarm-Cache"` - Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"` - FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"` - ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"` + Cache *bool `map:"Swarm-Cache"` + Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"` + FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"` + RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"` + ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { @@ -387,6 +389,9 @@ func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathV jsonhttp.BadRequest(w, "could not parse headers") return } + if headers.RLevel != nil { + ctx = redundancy.SetLevelInContext(ctx, *headers.RLevel) + } FETCH: // read manifest entry @@ -421,14 +426,17 @@ FETCH: jsonhttp.NotFound(w, "no update found") return } - ref, _, err := parseFeedUpdate(ch) + wc, err := feeds.GetWrappedChunk(ctx, s.storer.ChunkStore(), ch) if err != nil { logger.Debug("bzz download: mapStructure feed update failed", "error", err) logger.Error(nil, "bzz download: mapStructure feed update failed") jsonhttp.InternalServerError(w, "mapStructure feed update") return } - address = ref + address = wc.Address() + // modify ls and init with non-existing wrapped chunk + ls = loadsave.NewReadonlyWithRootCh(s.storer.Download(cache), wc) + feedDereferenced = true curBytes, err := cur.MarshalBinary() if err != nil { @@ -550,17 +558,18 @@ func (s *Service) serveManifestEntry( additionalHeaders[ContentTypeHeader] = []string{mimeType} } - s.downloadHandler(logger, w, r, manifestEntry.Reference(), additionalHeaders, etag, headersOnly) + s.downloadHandler(logger, w, r, manifestEntry.Reference(), additionalHeaders, etag, headersOnly, nil) } // downloadHandler contains common logic for downloading Swarm file from API -func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag, headersOnly bool) { +func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag, headersOnly bool, rootCh swarm.Chunk) { headers := struct { - Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"` - FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"` - ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"` - LookaheadBufferSize *int `map:"Swarm-Lookahead-Buffer-Size"` - Cache *bool `map:"Swarm-Cache"` + Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"` + RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"` + FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"` + ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"` + LookaheadBufferSize *int `map:"Swarm-Lookahead-Buffer-Size"` + Cache *bool `map:"Swarm-Cache"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { @@ -579,8 +588,19 @@ func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *h jsonhttp.BadRequest(w, "could not parse headers") return } + if headers.RLevel != nil { + ctx = redundancy.SetLevelInContext(ctx, *headers.RLevel) + } - reader, l, err := joiner.New(ctx, s.storer.Download(cache), s.storer.Cache(), reference) + var ( + reader file.Joiner + l int64 + ) + if rootCh != nil { + reader, l, err = joiner.NewJoiner(ctx, s.storer.Download(cache), s.storer.Cache(), reference, rootCh) + } else { + reader, l, err = joiner.New(ctx, s.storer.Download(cache), s.storer.Cache(), reference) + } if err != nil { if errors.Is(err, storage.ErrNotFound) || errors.Is(err, topology.ErrNotFound) { logger.Debug("api download: not found ", "address", reference, "error", err) diff --git a/pkg/api/feed.go b/pkg/api/feed.go index 7750fd7605c..7c92f3fd855 100644 --- a/pkg/api/feed.go +++ b/pkg/api/feed.go @@ -5,11 +5,13 @@ package api import ( - "encoding/binary" + "bytes" "encoding/hex" "errors" - "fmt" + "io" "net/http" + "strconv" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -34,8 +36,6 @@ const ( feedMetadataEntryType = "swarm-feed-type" ) -var errInvalidFeedUpdate = errors.New("invalid feed update") - type feedReferenceResponse struct { Reference swarm.Address `json:"reference"` } @@ -64,6 +64,14 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) { queries.At = time.Now().Unix() } + headers := struct { + OnlyRootChunk bool `map:"Swarm-Only-Root-Chunk"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + f := feeds.New(paths.Topic, paths.Owner) lookup, err := s.feedFactory.NewLookup(feeds.Sequence, f) if err != nil { @@ -94,11 +102,10 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) { return } - ref, _, err := parseFeedUpdate(ch) + wc, err := feeds.GetWrappedChunk(r.Context(), s.storer.ChunkStore(), ch) if err != nil { - logger.Debug("mapStructure feed update failed", "error", err) - logger.Error(nil, "mapStructure feed update failed") - jsonhttp.InternalServerError(w, "mapStructure feed update failed") + logger.Error(nil, "wrapped chunk cannot be retrieved") + jsonhttp.NotFound(w, "wrapped chunk cannot be retrieved") return } @@ -118,11 +125,33 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set(SwarmFeedIndexHeader, hex.EncodeToString(curBytes)) - w.Header().Set(SwarmFeedIndexNextHeader, hex.EncodeToString(nextBytes)) - w.Header().Set("Access-Control-Expose-Headers", fmt.Sprintf("%s, %s", SwarmFeedIndexHeader, SwarmFeedIndexNextHeader)) + socCh, err := soc.FromChunk(ch) + if err != nil { + logger.Error(nil, "wrapped chunk cannot be retrieved") + jsonhttp.NotFound(w, "wrapped chunk cannot be retrieved") + return + } + sig := socCh.Signature() + + additionalHeaders := http.Header{ + ContentTypeHeader: {"application/octet-stream"}, + SwarmFeedIndexHeader: {hex.EncodeToString(curBytes)}, + SwarmFeedIndexNextHeader: {hex.EncodeToString(nextBytes)}, + SwarmSocSignatureHeader: {hex.EncodeToString(sig)}, + "Access-Control-Expose-Headers": {SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader}, + } + + if headers.OnlyRootChunk { + w.Header().Set(ContentLengthHeader, strconv.Itoa(len(wc.Data()))) + // include additional headers + for name, values := range additionalHeaders { + w.Header().Set(name, strings.Join(values, ", ")) + } + _, _ = io.Copy(w, bytes.NewReader(wc.Data())) + return + } - jsonhttp.OK(w, feedReferenceResponse{Reference: ref}) + s.downloadHandler(logger, w, r, wc.Address(), additionalHeaders, true, false, wc) } func (s *Service) feedPostHandler(w http.ResponseWriter, r *http.Request) { @@ -278,22 +307,3 @@ func (s *Service) feedPostHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.Created(w, feedReferenceResponse{Reference: encryptedReference}) } - -func parseFeedUpdate(ch swarm.Chunk) (swarm.Address, int64, error) { - s, err := soc.FromChunk(ch) - if err != nil { - return swarm.ZeroAddress, 0, fmt.Errorf("soc unmarshal: %w", err) - } - - update := s.WrappedChunk().Data() - // split the timestamp and reference - // possible values right now: - // unencrypted ref: span+timestamp+ref => 8+8+32=48 - // encrypted ref: span+timestamp+ref+decryptKey => 8+8+64=80 - if len(update) != 48 && len(update) != 80 { - return swarm.ZeroAddress, 0, errInvalidFeedUpdate - } - ts := binary.BigEndian.Uint64(update[8:16]) - ref := swarm.NewAddress(update[16:]) - return ref, int64(ts), nil -} diff --git a/pkg/api/feed_test.go b/pkg/api/feed_test.go index a35b9ce3423..843756d7237 100644 --- a/pkg/api/feed_test.go +++ b/pkg/api/feed_test.go @@ -5,11 +5,13 @@ package api_test import ( + "bytes" "context" "encoding/binary" "encoding/hex" "errors" "fmt" + "io" "math/big" "net/http" "testing" @@ -17,6 +19,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/api" "github.com/ethersphere/bee/v2/pkg/feeds" "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/splitter" "github.com/ethersphere/bee/v2/pkg/jsonhttp" "github.com/ethersphere/bee/v2/pkg/jsonhttp/jsonhttptest" "github.com/ethersphere/bee/v2/pkg/log" @@ -24,8 +27,10 @@ import ( "github.com/ethersphere/bee/v2/pkg/postage" mockpost "github.com/ethersphere/bee/v2/pkg/postage/mock" testingsoc "github.com/ethersphere/bee/v2/pkg/soc/testing" + testingc "github.com/ethersphere/bee/v2/pkg/storage/testing" mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/ethersphere/bee/v2/pkg/util/testutil" ) const ownerString = "8d3766440f0d7b949a5e32995d09619a7f86e632" @@ -44,13 +49,22 @@ func TestFeed_Get(t *testing.T) { } mockStorer = mockstorer.New() ) + putter, err := mockStorer.Upload(context.Background(), false, 0) + if err != nil { + t.Fatal(err) + } + mockWrappedCh := testingc.FixtureChunk("0033") + err = putter.Put(context.Background(), mockWrappedCh) + if err != nil { + t.Fatal(err) + } t.Run("with at", func(t *testing.T) { t.Parallel() var ( timestamp = int64(12121212) - ch = toChunk(t, uint64(timestamp), expReference.Bytes()) + ch = toChunk(t, uint64(timestamp), mockWrappedCh.Address().Bytes()) look = newMockLookup(12, 0, ch, nil, &id{}, &id{}) factory = newMockFactory(look) idBytes, _ = (&id{}).MarshalBinary() @@ -61,7 +75,7 @@ func TestFeed_Get(t *testing.T) { ) jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", "12"), http.StatusOK, - jsonhttptest.WithExpectedJSONResponse(api.FeedReferenceResponse{Reference: expReference}), + jsonhttptest.WithExpectedResponse(mockWrappedCh.Data()[swarm.SpanSize:]), jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedIndexHeader, hex.EncodeToString(idBytes)), ) }) @@ -71,7 +85,7 @@ func TestFeed_Get(t *testing.T) { var ( timestamp = int64(12121212) - ch = toChunk(t, uint64(timestamp), expReference.Bytes()) + ch = toChunk(t, uint64(timestamp), mockWrappedCh.Address().Bytes()) look = newMockLookup(-1, 2, ch, nil, &id{}, &id{}) factory = newMockFactory(look) idBytes, _ = (&id{}).MarshalBinary() @@ -83,10 +97,102 @@ func TestFeed_Get(t *testing.T) { ) jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusOK, - jsonhttptest.WithExpectedJSONResponse(api.FeedReferenceResponse{Reference: expReference}), + jsonhttptest.WithExpectedResponse(mockWrappedCh.Data()[swarm.SpanSize:]), + jsonhttptest.WithExpectedContentLength(len(mockWrappedCh.Data()[swarm.SpanSize:])), jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedIndexHeader, hex.EncodeToString(idBytes)), ) }) + + t.Run("chunk wrapping", func(t *testing.T) { + t.Parallel() + + testData := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8} + + var ( + ch = testingsoc.GenerateMockSOC(t, testData).Chunk() + look = newMockLookup(-1, 2, ch, nil, &id{}, &id{}) + factory = newMockFactory(look) + idBytes, _ = (&id{}).MarshalBinary() + + client, _, _, _ = newTestServer(t, testServerOptions{ + Storer: mockStorer, + Feeds: factory, + }) + ) + + jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusOK, + jsonhttptest.WithExpectedResponse(testData), + jsonhttptest.WithExpectedContentLength(len(testData)), + jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedIndexHeader, hex.EncodeToString(idBytes)), + ) + }) + + t.Run("legacy payload with non existing wrapped chunk", func(t *testing.T) { + t.Parallel() + + wrappedRef := make([]byte, swarm.HashSize) + _ = copy(wrappedRef, mockWrappedCh.Address().Bytes()) + wrappedRef[0]++ + + var ( + ch = toChunk(t, uint64(12121212), wrappedRef) + look = newMockLookup(-1, 2, ch, nil, &id{}, &id{}) + factory = newMockFactory(look) + + client, _, _, _ = newTestServer(t, testServerOptions{ + Storer: mockStorer, + Feeds: factory, + }) + ) + + jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusNotFound) + }) + + t.Run("bigger payload than one chunk", func(t *testing.T) { + t.Parallel() + + testDataLen := 5000 + testData := testutil.RandBytesWithSeed(t, testDataLen, 1) + s := splitter.NewSimpleSplitter(putter) + addr, err := s.Split(context.Background(), io.NopCloser(bytes.NewReader(testData)), int64(testDataLen), false) + if err != nil { + t.Fatal(err) + } + + // get root ch addr then add wrap it with soc + testRootCh, err := mockStorer.ChunkStore().Get(context.Background(), addr) + if err != nil { + t.Fatal(err) + } + var ( + ch = testingsoc.GenerateMockSOCWithSpan(t, testRootCh.Data()).Chunk() + look = newMockLookup(-1, 2, ch, nil, &id{}, &id{}) + factory = newMockFactory(look) + idBytes, _ = (&id{}).MarshalBinary() + + client, _, _, _ = newTestServer(t, testServerOptions{ + Storer: mockStorer, + Feeds: factory, + }) + ) + + t.Run("retrieve chunk tree", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusOK, + jsonhttptest.WithExpectedResponse(testData), + jsonhttptest.WithExpectedContentLength(testDataLen), + jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedIndexHeader, hex.EncodeToString(idBytes)), + ) + }) + + t.Run("retrieve only wrapped chunk", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmOnlyRootChunk, "true"), + jsonhttptest.WithExpectedResponse(testRootCh.Data()), + jsonhttptest.WithExpectedContentLength(len(testRootCh.Data())), + jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedIndexHeader, hex.EncodeToString(idBytes)), + ) + }) + }) } // nolint:paralleltest diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go index 3aec3e3836a..d0870fde9b8 100644 --- a/pkg/api/rchash.go +++ b/pkg/api/rchash.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "net/http" "strconv" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethersphere/bee/v2/pkg/jsonhttp" @@ -17,9 +16,9 @@ import ( ) type RCHashResponse struct { - Hash swarm.Address `json:"hash"` - Proofs ChunkInclusionProofs `json:"proofs"` - Duration time.Duration `json:"duration"` + Hash swarm.Address `json:"hash"` + Proofs ChunkInclusionProofs `json:"proofs"` + DurationSeconds float64 `json:"durationSeconds"` } type ChunkInclusionProofs struct { @@ -131,9 +130,9 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { } resp := RCHashResponse{ - Hash: swp.Hash, - Duration: swp.Duration, - Proofs: renderChunkInclusionProofs(swp.Proofs), + Hash: swp.Hash, + DurationSeconds: swp.Duration.Seconds(), + Proofs: renderChunkInclusionProofs(swp.Proofs), } jsonhttp.OK(w, resp) diff --git a/pkg/api/readiness.go b/pkg/api/readiness.go index d7f0d8dd27a..35685483892 100644 --- a/pkg/api/readiness.go +++ b/pkg/api/readiness.go @@ -4,12 +4,27 @@ package api -import "net/http" +import ( + "net/http" + + "github.com/ethersphere/bee/v2" + "github.com/ethersphere/bee/v2/pkg/jsonhttp" +) + +type ReadyStatusResponse healthStatusResponse func (s *Service) readinessHandler(w http.ResponseWriter, _ *http.Request) { if s.probe.Ready() == ProbeStatusOK { - w.WriteHeader(http.StatusOK) + jsonhttp.OK(w, ReadyStatusResponse{ + Status: "ready", + Version: bee.Version, + APIVersion: Version, + }) } else { - w.WriteHeader(http.StatusBadRequest) + jsonhttp.BadRequest(w, ReadyStatusResponse{ + Status: "notReady", + Version: bee.Version, + APIVersion: Version, + }) } } diff --git a/pkg/api/readiness_test.go b/pkg/api/readiness_test.go index 52677214919..0454f541c2f 100644 --- a/pkg/api/readiness_test.go +++ b/pkg/api/readiness_test.go @@ -37,10 +37,20 @@ func TestReadiness(t *testing.T) { // When we set readiness probe to OK it should indicate that API is ready probe.SetReady(api.ProbeStatusOK) - jsonhttptest.Request(t, testServer, http.MethodGet, "/readiness", http.StatusOK) + jsonhttptest.Request(t, testServer, http.MethodGet, "/readiness", http.StatusOK, + jsonhttptest.WithExpectedJSONResponse(api.ReadyStatusResponse{ + Status: "ready", + Version: "-dev", + APIVersion: "0.0.0", + })) // When we set readiness probe to NOK it should indicate that API is not ready probe.SetReady(api.ProbeStatusNOK) - jsonhttptest.Request(t, testServer, http.MethodGet, "/readiness", http.StatusBadRequest) + jsonhttptest.Request(t, testServer, http.MethodGet, "/readiness", http.StatusBadRequest, + jsonhttptest.WithExpectedJSONResponse(api.ReadyStatusResponse{ + Status: "notReady", + Version: "-dev", + APIVersion: "0.0.0", + })) }) } diff --git a/pkg/api/router.go b/pkg/api/router.go index 2326eb910df..aa1f1514622 100644 --- a/pkg/api/router.go +++ b/pkg/api/router.go @@ -66,9 +66,13 @@ func (s *Service) MountAPI() { "/bzz", "/bytes", "/chunks", + "/feeds", + "/soc", rootPath + "/bzz", rootPath + "/bytes", rootPath + "/chunks", + rootPath + "/feeds", + rootPath + "/soc", } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -247,6 +251,7 @@ func (s *Service) mountAPI() { }) handle("/soc/{owner}/{id}", jsonhttp.MethodHandler{ + "GET": http.HandlerFunc(s.socGetHandler), "POST": web.ChainHandlers( jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize), web.FinalHandlerFunc(s.socUploadHandler), @@ -507,10 +512,10 @@ func (s *Service) mountBusinessDebug() { ), }) + handle("/wallet", jsonhttp.MethodHandler{ + "GET": http.HandlerFunc(s.walletHandler), + }) if s.swapEnabled { - handle("/wallet", jsonhttp.MethodHandler{ - "GET": http.HandlerFunc(s.walletHandler), - }) handle("/wallet/withdraw/{coin}", jsonhttp.MethodHandler{ "POST": web.ChainHandlers( s.gasConfigMiddleware("wallet withdraw"), @@ -633,6 +638,14 @@ func (s *Service) mountBusinessDebug() { ), }) + handle("/status/neighborhoods", jsonhttp.MethodHandler{ + "GET": web.ChainHandlers( + httpaccess.NewHTTPAccessSuppressLogHandler(), + s.statusAccessHandler, + web.FinalHandlerFunc(s.statusGetNeighborhoods), + ), + }) + handle("/rchash/{depth}/{anchor1}/{anchor2}", web.ChainHandlers( web.FinalHandler(jsonhttp.MethodHandler{ "GET": http.HandlerFunc(s.rchash), diff --git a/pkg/api/soc.go b/pkg/api/soc.go index 08843e6013d..a6e0c37e187 100644 --- a/pkg/api/soc.go +++ b/pkg/api/soc.go @@ -5,9 +5,13 @@ package api import ( + "bytes" + "encoding/hex" "errors" "io" "net/http" + "strconv" + "strings" "github.com/ethersphere/bee/v2/pkg/accesscontrol" "github.com/ethersphere/bee/v2/pkg/cac" @@ -228,3 +232,66 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.Created(w, socPostResponse{Reference: reference}) } + +func (s *Service) socGetHandler(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("get_soc").Build() + + paths := struct { + Owner []byte `map:"owner" validate:"required"` + ID []byte `map:"id" validate:"required"` + }{} + if response := s.mapStructure(mux.Vars(r), &paths); response != nil { + response("invalid path params", logger, w) + return + } + + headers := struct { + OnlyRootChunk bool `map:"Swarm-Only-Root-Chunk"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + + address, err := soc.CreateAddress(paths.ID, paths.Owner) + if err != nil { + logger.Error(err, "soc address cannot be created") + jsonhttp.BadRequest(w, "soc address cannot be created") + return + } + + getter := s.storer.Download(true) + sch, err := getter.Get(r.Context(), address) + if err != nil { + logger.Error(err, "soc retrieval has been failed") + jsonhttp.NotFound(w, "requested chunk cannot be retrieved") + return + } + socCh, err := soc.FromChunk(sch) + if err != nil { + logger.Error(err, "chunk is not a signle owner chunk") + jsonhttp.InternalServerError(w, "chunk is not a single owner chunk") + return + } + + sig := socCh.Signature() + wc := socCh.WrappedChunk() + + additionalHeaders := http.Header{ + ContentTypeHeader: {"application/octet-stream"}, + SwarmSocSignatureHeader: {hex.EncodeToString(sig)}, + "Access-Control-Expose-Headers": {SwarmSocSignatureHeader}, + } + + if headers.OnlyRootChunk { + w.Header().Set(ContentLengthHeader, strconv.Itoa(len(wc.Data()))) + // include additional headers + for name, values := range additionalHeaders { + w.Header().Set(name, strings.Join(values, ", ")) + } + _, _ = io.Copy(w, bytes.NewReader(wc.Data())) + return + } + + s.downloadHandler(logger, w, r, wc.Address(), additionalHeaders, true, false, wc) +} diff --git a/pkg/api/soc_test.go b/pkg/api/soc_test.go index 94eeccf60dd..2407689ece1 100644 --- a/pkg/api/soc_test.go +++ b/pkg/api/soc_test.go @@ -89,16 +89,31 @@ func TestSOC(t *testing.T) { ) // try to fetch the same chunk - rsrc := fmt.Sprintf("/chunks/" + s.Address().String()) - resp := request(t, client, http.MethodGet, rsrc, nil, http.StatusOK) - data, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(s.Chunk().Data(), data) { - t.Fatal("data retrieved doesn't match uploaded content") - } + t.Run("chunks fetch", func(t *testing.T) { + rsrc := fmt.Sprintf("/chunks/" + s.Address().String()) + resp := request(t, client, http.MethodGet, rsrc, nil, http.StatusOK) + data, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(s.Chunk().Data(), data) { + t.Fatal("data retrieved doesn't match uploaded content") + } + }) + + t.Run("soc fetch", func(t *testing.T) { + rsrc := fmt.Sprintf("/soc/%s/%s", hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID)) + resp := request(t, client, http.MethodGet, rsrc, nil, http.StatusOK) + data, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(s.WrappedChunk.Data()[swarm.SpanSize:], data) { + t.Fatal("data retrieved doesn't match uploaded content") + } + }) }) t.Run("postage", func(t *testing.T) { diff --git a/pkg/api/status.go b/pkg/api/status.go index 867c7d410e9..30e09e1f166 100644 --- a/pkg/api/status.go +++ b/pkg/api/status.go @@ -36,6 +36,16 @@ type statusResponse struct { Snapshots []statusSnapshotResponse `json:"snapshots"` } +type statusNeighborhoodResponse struct { + Neighborhood string `json:"neighborhood"` + ReserveSizeWithinRadius int `json:"reserveSizeWithinRadius"` + Proximity uint8 `json:"proximity"` +} + +type neighborhoodsResponse struct { + Neighborhoods []statusNeighborhoodResponse `json:"neighborhoods"` +} + // statusAccessHandler is a middleware that limits the number of simultaneous // status requests. func (s *Service) statusAccessHandler(h http.Handler) http.Handler { @@ -159,3 +169,34 @@ func (s *Service) statusGetPeersHandler(w http.ResponseWriter, r *http.Request) }) jsonhttp.OK(w, statusResponse{Snapshots: snapshots}) } + +// statusGetHandler returns the current node status. +func (s *Service) statusGetNeighborhoods(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("get_status_neighborhoods").Build() + + if s.beeMode == DevMode { + logger.Warning("status neighborhoods endpoint is disabled in dev mode") + jsonhttp.BadRequest(w, errUnsupportedDevNodeOperation) + return + } + + neighborhoods := make([]statusNeighborhoodResponse, 0) + + nhoods, err := s.storer.NeighborhoodsStat(r.Context()) + if err != nil { + logger.Debug("unable to get neighborhoods status", "error", err) + logger.Error(nil, "unable to get neighborhoods status") + jsonhttp.InternalServerError(w, "unable to get neighborhoods status") + return + } + + for _, n := range nhoods { + neighborhoods = append(neighborhoods, statusNeighborhoodResponse{ + Neighborhood: n.Neighborhood.String(), + ReserveSizeWithinRadius: n.ReserveSizeWithinRadius, + Proximity: swarm.Proximity(s.overlay.Bytes(), n.Neighborhood.Bytes()), + }) + } + + jsonhttp.OK(w, neighborhoodsResponse{Neighborhoods: neighborhoods}) +} diff --git a/pkg/api/status_test.go b/pkg/api/status_test.go index 31822ce789d..654e94708a8 100644 --- a/pkg/api/status_test.go +++ b/pkg/api/status_test.go @@ -5,6 +5,7 @@ package api_test import ( + "context" "net/http" "testing" @@ -14,6 +15,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/status" + "github.com/ethersphere/bee/v2/pkg/storer" "github.com/ethersphere/bee/v2/pkg/topology" ) @@ -119,6 +121,7 @@ type statusSnapshotMock struct { storageRadius uint8 commitment uint64 chainState *postage.ChainState + neighborhoods []*storer.NeighborhoodStat } func (m *statusSnapshotMock) SyncRate() float64 { return m.syncRate } @@ -129,3 +132,6 @@ func (m *statusSnapshotMock) GetChainState() *postage.ChainState { return m.chai func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 { return m.reserveSizeWithinRadius } +func (m *statusSnapshotMock) NeighborhoodsStat(ctx context.Context) ([]*storer.NeighborhoodStat, error) { + return m.neighborhoods, nil +} diff --git a/pkg/api/transaction.go b/pkg/api/transaction.go index 6f93be7ca57..5be66c7fb42 100644 --- a/pkg/api/transaction.go +++ b/pkg/api/transaction.go @@ -144,8 +144,7 @@ func (s *Service) transactionResendHandler(w http.ResponseWriter, r *http.Reques err := s.transaction.ResendTransaction(r.Context(), paths.Hash) if err != nil { - logger.Debug("resend transaction failed", "tx_hash", paths.Hash, "error", err) - logger.Error(nil, "resend transaction failed", "tx_hash", paths.Hash) + logger.Error(nil, "resend transaction failed", "tx_hash", paths.Hash, "error", err) if errors.Is(err, transaction.ErrUnknownTransaction) { jsonhttp.NotFound(w, errUnknownTransaction) } else if errors.Is(err, transaction.ErrAlreadyImported) { diff --git a/pkg/crypto/clef/clef.go b/pkg/crypto/clef/clef.go deleted file mode 100644 index 7d30ab6335a..00000000000 --- a/pkg/crypto/clef/clef.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2020 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clef - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "math/big" - "os" - "path/filepath" - "runtime" - - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethersphere/bee/v2/pkg/crypto" - "github.com/ethersphere/bee/v2/pkg/crypto/eip712" -) - -var ( - ErrNoAccounts = errors.New("no accounts found in clef") - ErrAccountNotAvailable = errors.New("account not available in clef") - clefRecoveryMessage = []byte("public key recovery message") -) - -// ExternalSignerInterface is the interface for the clef client from go-ethereum. -type ExternalSignerInterface interface { - SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) - SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) - Accounts() []accounts.Account -} - -// Client is the interface for rpc.RpcClient. -type Client interface { - Call(result interface{}, method string, args ...interface{}) error -} - -type clefSigner struct { - client Client // low-level rpc client to clef as ExternalSigner does not implement account_signTypedData - clef ExternalSignerInterface - account accounts.Account // the account this signer will use - pubKey *ecdsa.PublicKey // the public key for the account -} - -// DefaultIpcPath returns the os-dependent default ipc path for clef. -func DefaultIpcPath() (string, error) { - socket := "clef.ipc" - // on windows clef uses top level pipes - if runtime.GOOS == "windows" { - return `\\.\pipe\` + socket, nil - } - - home, err := os.UserHomeDir() - if err != nil { - return "", err - } - - // on mac os clef defaults to ~/Library/Signer/clef.ipc - if runtime.GOOS == "darwin" { - return filepath.Join(home, "Library", "Signer", socket), nil - } - - // on unix clef defaults to ~/.clef/clef.ipc - return filepath.Join(home, ".clef", socket), nil -} - -func selectAccount(clef ExternalSignerInterface, ethAddress *common.Address) (accounts.Account, error) { - // get the list of available ethereum accounts - clefAccounts := clef.Accounts() - if len(clefAccounts) == 0 { - return accounts.Account{}, ErrNoAccounts - } - - if ethAddress == nil { - // pick the first account as the one we use - return clefAccounts[0], nil - } - - for _, availableAccount := range clefAccounts { - if availableAccount.Address == *ethAddress { - return availableAccount, nil - } - } - return accounts.Account{}, ErrAccountNotAvailable -} - -// NewSigner creates a new connection to the signer at endpoint. -// If ethAddress is nil the account with index 0 will be selected. Otherwise it will verify the requested account actually exists. -// As clef does not expose public keys it signs a test message to recover the public key. -func NewSigner(clef ExternalSignerInterface, client Client, recoverFunc crypto.RecoverFunc, ethAddress *common.Address) (signer crypto.Signer, err error) { - account, err := selectAccount(clef, ethAddress) - if err != nil { - return nil, err - } - - // clef currently does not expose the public key - // sign some data so we can recover it - sig, err := clef.SignData(account, accounts.MimetypeTextPlain, clefRecoveryMessage) - if err != nil { - return nil, err - } - - pubKey, err := recoverFunc(sig, clefRecoveryMessage) - if err != nil { - return nil, err - } - - return &clefSigner{ - client: client, - clef: clef, - account: account, - pubKey: pubKey, - }, nil -} - -// PublicKey returns the public key recovered during creation. -func (c *clefSigner) PublicKey() (*ecdsa.PublicKey, error) { - return c.pubKey, nil -} - -// SignData signs with the text/plain type which is the standard Ethereum prefix method. -func (c *clefSigner) Sign(data []byte) ([]byte, error) { - return c.clef.SignData(c.account, accounts.MimetypeTextPlain, data) -} - -// SignTx signs an ethereum transaction. -func (c *clefSigner) SignTx(transaction *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - // chainId is nil here because it is set on the clef side - tx, err := c.clef.SignTx(c.account, transaction, nil) - if err != nil { - return nil, err - } - - if chainID.Cmp(tx.ChainId()) != 0 { - return nil, fmt.Errorf("misconfigured signer: wrong chain id %d; wanted %d", tx.ChainId(), chainID) - } - - return tx, nil -} - -// EthereumAddress returns the ethereum address this signer uses. -func (c *clefSigner) EthereumAddress() (common.Address, error) { - return c.account.Address, nil -} - -// SignTypedData signs data according to eip712. -func (c *clefSigner) SignTypedData(typedData *eip712.TypedData) ([]byte, error) { - var sig hexutil.Bytes - err := c.client.Call(&sig, "account_signTypedData", c.account.Address, typedData) - if err != nil { - return nil, err - } - - return sig, nil -} diff --git a/pkg/crypto/clef/clef_test.go b/pkg/crypto/clef/clef_test.go deleted file mode 100644 index 34573de4d1c..00000000000 --- a/pkg/crypto/clef/clef_test.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2020 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clef_test - -import ( - "bytes" - "crypto/ecdsa" - "errors" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethersphere/bee/v2/pkg/crypto" - "github.com/ethersphere/bee/v2/pkg/crypto/clef" - "github.com/ethersphere/bee/v2/pkg/crypto/eip712" -) - -type mockClef struct { - accounts []accounts.Account - signature []byte - - signedMimeType string - signedData []byte - signedAccount accounts.Account -} - -func (m *mockClef) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { - m.signedAccount = account - m.signedMimeType = mimeType - m.signedData = data - return m.signature, nil -} - -func (m *mockClef) Accounts() []accounts.Account { - return m.accounts -} - -func (m *mockClef) SignTx(account accounts.Account, transaction *types.Transaction, chainId *big.Int) (*types.Transaction, error) { - return nil, nil -} - -func TestNewClefSigner(t *testing.T) { - t.Parallel() - - ethAddress := common.HexToAddress("0x31415b599f636129AD03c196cef9f8f8b184D5C7") - testSignature := make([]byte, 65) - - key, err := crypto.GenerateSecp256k1Key() - if err != nil { - t.Fatal(err) - } - publicKey := &key.PublicKey - - mock := &mockClef{ - accounts: []accounts.Account{ - { - Address: ethAddress, - }, - { - Address: common.Address{}, - }, - }, - signature: testSignature, - } - - signer, err := clef.NewSigner(mock, nil, func(signature, data []byte) (*ecdsa.PublicKey, error) { - if !bytes.Equal(testSignature, signature) { - t.Fatalf("wrong data used for recover. expected %v got %v", testSignature, signature) - } - - if !bytes.Equal(clef.ClefRecoveryMessage, data) { - t.Fatalf("wrong data used for recover. expected %v got %v", clef.ClefRecoveryMessage, data) - } - return publicKey, nil - }, nil) - if err != nil { - t.Fatal(err) - } - - if mock.signedAccount.Address != ethAddress { - t.Fatalf("wrong account used for signing. expected %v got %v", ethAddress, mock.signedAccount.Address) - } - - if mock.signedMimeType != accounts.MimetypeTextPlain { - t.Fatalf("wrong mime type used for signing. expected %v got %v", accounts.MimetypeTextPlain, mock.signedMimeType) - } - - if !bytes.Equal(mock.signedData, clef.ClefRecoveryMessage) { - t.Fatalf("wrong data used for signing. expected %v got %v", clef.ClefRecoveryMessage, mock.signedData) - } - - signerPublicKey, err := signer.PublicKey() - if err != nil { - t.Fatal(err) - } - - if signerPublicKey != publicKey { - t.Fatalf("wrong public key. expected %v got %v", publicKey, signerPublicKey) - } -} - -func TestNewClefSignerSpecificAccount(t *testing.T) { - t.Parallel() - - ethAddress := common.HexToAddress("0x31415b599f636129AD03c196cef9f8f8b184D5C7") - wantedAddress := common.HexToAddress("0x41415b599f636129AD03c196cef9f8f8b184D5C7") - testSignature := make([]byte, 65) - - key, err := crypto.GenerateSecp256k1Key() - if err != nil { - t.Fatal(err) - } - publicKey := &key.PublicKey - - mock := &mockClef{ - accounts: []accounts.Account{ - { - Address: ethAddress, - }, - { - Address: wantedAddress, - }, - }, - signature: testSignature, - } - - signer, err := clef.NewSigner(mock, nil, func(signature, data []byte) (*ecdsa.PublicKey, error) { - if !bytes.Equal(testSignature, signature) { - t.Fatalf("wrong data used for recover. expected %v got %v", testSignature, signature) - } - - if !bytes.Equal(clef.ClefRecoveryMessage, data) { - t.Fatalf("wrong data used for recover. expected %v got %v", clef.ClefRecoveryMessage, data) - } - return publicKey, nil - }, &wantedAddress) - if err != nil { - t.Fatal(err) - } - - if mock.signedAccount.Address != wantedAddress { - t.Fatalf("wrong account used for signing. expected %v got %v", wantedAddress, mock.signedAccount.Address) - } - - if mock.signedMimeType != accounts.MimetypeTextPlain { - t.Fatalf("wrong mime type used for signing. expected %v got %v", accounts.MimetypeTextPlain, mock.signedMimeType) - } - - if !bytes.Equal(mock.signedData, clef.ClefRecoveryMessage) { - t.Fatalf("wrong data used for signing. expected %v got %v", clef.ClefRecoveryMessage, mock.signedData) - } - - signerPublicKey, err := signer.PublicKey() - if err != nil { - t.Fatal(err) - } - - if signerPublicKey != publicKey { - t.Fatalf("wrong public key. expected %v got %v", publicKey, signerPublicKey) - } -} - -func TestNewClefSignerAccountUnavailable(t *testing.T) { - t.Parallel() - - ethAddress := common.HexToAddress("0x31415b599f636129AD03c196cef9f8f8b184D5C7") - wantedAddress := common.HexToAddress("0x41415b599f636129AD03c196cef9f8f8b184D5C7") - - mock := &mockClef{ - accounts: []accounts.Account{ - { - Address: ethAddress, - }, - }, - } - - _, err := clef.NewSigner(mock, nil, func(signature, data []byte) (*ecdsa.PublicKey, error) { - return nil, errors.New("called sign") - }, &wantedAddress) - if !errors.Is(err, clef.ErrAccountNotAvailable) { - t.Fatalf("expected account to be not available. got error %v", err) - } -} - -func TestClefNoAccounts(t *testing.T) { - t.Parallel() - - mock := &mockClef{ - accounts: []accounts.Account{}, - } - - _, err := clef.NewSigner(mock, nil, nil, nil) - if err == nil { - t.Fatal("expected ErrNoAccounts error if no accounts") - } - if !errors.Is(err, clef.ErrNoAccounts) { - t.Fatalf("expected ErrNoAccounts error but got %v", err) - } -} - -type mockRpc struct { - call func(result interface{}, method string, args ...interface{}) error -} - -func (m *mockRpc) Call(result interface{}, method string, args ...interface{}) error { - return m.call(result, method, args...) -} - -func TestClefTypedData(t *testing.T) { - t.Parallel() - - key, err := crypto.GenerateSecp256k1Key() - if err != nil { - t.Fatal(err) - } - publicKey := &key.PublicKey - signature := common.FromHex("0xabcdef") - - account := common.HexToAddress("21b26864067deb88e2d5cdca512167815f2910d3") - - typedData := &eip712.TypedData{ - PrimaryType: "MyType", - } - - signer, err := clef.NewSigner(&mockClef{ - accounts: []accounts.Account{ - { - Address: account, - }, - }, - signature: make([]byte, 65), - }, &mockRpc{ - call: func(result interface{}, method string, args ...interface{}) error { - if method != "account_signTypedData" { - t.Fatalf("called wrong method. was %s", method) - } - if args[0].(common.Address) != account { - t.Fatalf("called with wrong account. was %x, wanted %x", args[0].(common.Address), account) - } - if args[1].(*eip712.TypedData) != typedData { - t.Fatal("called with wrong data") - } - *result.(*hexutil.Bytes) = signature - return nil - }, - }, func(signature, data []byte) (*ecdsa.PublicKey, error) { - return publicKey, nil - }, nil) - if err != nil { - t.Fatal(err) - } - - s, err := signer.SignTypedData(typedData) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(s, signature) { - t.Fatalf("wrong signature. wanted %x, got %x", signature, s) - } -} diff --git a/pkg/crypto/clef/export_test.go b/pkg/crypto/clef/export_test.go deleted file mode 100644 index d879bf90803..00000000000 --- a/pkg/crypto/clef/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clef - -var ( - ClefRecoveryMessage = clefRecoveryMessage -) diff --git a/pkg/crypto/dh.go b/pkg/crypto/dh.go index f7301d3c8bc..2d5b4e695fc 100644 --- a/pkg/crypto/dh.go +++ b/pkg/crypto/dh.go @@ -11,7 +11,6 @@ import ( // DH is an interface allowing to generate shared keys for public key // using a salt from a known private key -// TODO: implement clef support beside in-memory type DH interface { SharedKey(public *ecdsa.PublicKey, salt []byte) ([]byte, error) } diff --git a/pkg/feeds/epochs/finder.go b/pkg/feeds/epochs/finder.go index a85ab309e76..d5133fdaa57 100644 --- a/pkg/feeds/epochs/finder.go +++ b/pkg/feeds/epochs/finder.go @@ -51,10 +51,7 @@ func (f *finder) common(ctx context.Context, at int64, after uint64) (*epoch, sw } return e, nil, err } - ts, err := feeds.UpdatedAt(ch) - if err != nil { - return e, nil, err - } + ts := e.length() * e.start if ts <= uint64(at) { return e, ch, nil } @@ -78,10 +75,7 @@ func (f *finder) at(ctx context.Context, at uint64, e *epoch, ch swarm.Chunk) (s } // epoch found // check if timestamp is later then target - ts, err := feeds.UpdatedAt(uch) - if err != nil { - return nil, err - } + ts := e.length() * e.start if ts > at { if e.isLeft() { return ch, nil @@ -131,10 +125,7 @@ func (f *asyncFinder) get(ctx context.Context, at int64, e *epoch) (swarm.Chunk, } return nil, nil } - ts, err := feeds.UpdatedAt(u) - if err != nil { - return nil, err - } + ts := e.length() * e.start diff := at - int64(ts) if diff < 0 { return nil, nil diff --git a/pkg/feeds/epochs/updater.go b/pkg/feeds/epochs/updater.go index b36d77e7d96..f3588b98f1b 100644 --- a/pkg/feeds/epochs/updater.go +++ b/pkg/feeds/epochs/updater.go @@ -34,7 +34,7 @@ func NewUpdater(putter storage.Putter, signer crypto.Signer, topic []byte) (feed // Update pushes an update to the feed through the chunk stores func (u *updater) Update(ctx context.Context, at int64, payload []byte) error { e := next(u.epoch, u.last, uint64(at)) - err := u.Put(ctx, e, at, payload) + err := u.Put(ctx, e, payload) if err != nil { return err } diff --git a/pkg/feeds/feed.go b/pkg/feeds/feed.go index 7c9495cdfac..ac8d232f5ce 100644 --- a/pkg/feeds/feed.go +++ b/pkg/feeds/feed.go @@ -107,7 +107,7 @@ func NewUpdate(f *Feed, idx Index, timestamp int64, payload, sig []byte) (swarm. if err != nil { return nil, fmt.Errorf("update: %w", err) } - cac, err := toChunk(uint64(timestamp), payload) + cac, err := toChunk(payload) if err != nil { return nil, fmt.Errorf("toChunk: %w", err) } diff --git a/pkg/feeds/getter.go b/pkg/feeds/getter.go index 77b7599dd9e..9fcb2ccae77 100644 --- a/pkg/feeds/getter.go +++ b/pkg/feeds/getter.go @@ -16,6 +16,8 @@ import ( "github.com/ethersphere/bee/v2/pkg/swarm" ) +var errNotLegacyPayload = errors.New("feed update is not in the legacy payload structure") + // Lookup is the interface for time based feed lookup type Lookup interface { At(ctx context.Context, at int64, after uint64) (chunk swarm.Chunk, currentIndex, nextIndex Index, err error) @@ -49,31 +51,47 @@ func (f *Getter) Get(ctx context.Context, i Index) (swarm.Chunk, error) { return f.getter.Get(ctx, addr) } -// FromChunk parses out the timestamp and the payload -func FromChunk(ch swarm.Chunk) (uint64, []byte, error) { - s, err := soc.FromChunk(ch) +func GetWrappedChunk(ctx context.Context, getter storage.Getter, ch swarm.Chunk) (swarm.Chunk, error) { + wc, err := FromChunk(ch) + if err != nil { + return nil, err + } + // try to split the timestamp and reference + // possible values right now: + // unencrypted ref: span+timestamp+ref => 8+8+32=48 + // encrypted ref: span+timestamp+ref+decryptKey => 8+8+64=80 + _, ref, err := LegacyPayload(wc) if err != nil { - return 0, nil, err + if errors.Is(err, errNotLegacyPayload) { + return wc, nil + } + return nil, err } - cac := s.WrappedChunk() - if len(cac.Data()) < 16 { - return 0, nil, errors.New("feed update payload too short") + wc, err = getter.Get(ctx, ref) + if err != nil { + return nil, err } - payload := cac.Data()[16:] - at := binary.BigEndian.Uint64(cac.Data()[8:16]) - return at, payload, nil + + return wc, nil } -// UpdatedAt extracts the time of feed other than update -func UpdatedAt(ch swarm.Chunk) (uint64, error) { - d := ch.Data() - if len(d) < 113 { - return 0, fmt.Errorf("too short: %d", len(d)) +// FromChunk parses out the wrapped chunk +func FromChunk(ch swarm.Chunk) (swarm.Chunk, error) { + s, err := soc.FromChunk(ch) + if err != nil { + return nil, fmt.Errorf("soc unmarshal: %w", err) } - // a soc chunk with time information in the wrapped content addressed chunk - // 0-32 index, - // 65-97 signature, - // 98-105 span of wrapped chunk - // 105-113 timestamp - return binary.BigEndian.Uint64(d[105:113]), nil + return s.WrappedChunk(), nil +} + +// LegacyPayload returns back the referenced chunk and datetime from the legacy feed payload +func LegacyPayload(wrappedChunk swarm.Chunk) (uint64, swarm.Address, error) { + cacData := wrappedChunk.Data() + if !(len(cacData) == 16+swarm.HashSize || len(cacData) == 16+swarm.HashSize*2) { + return 0, swarm.ZeroAddress, errNotLegacyPayload + } + address := swarm.NewAddress(cacData[16:]) + at := binary.BigEndian.Uint64(cacData[8:16]) + + return at, address, nil } diff --git a/pkg/feeds/putter.go b/pkg/feeds/putter.go index 633276f8f63..abe1972a0a5 100644 --- a/pkg/feeds/putter.go +++ b/pkg/feeds/putter.go @@ -6,7 +6,6 @@ package feeds import ( "context" - "encoding/binary" "github.com/ethersphere/bee/v2/pkg/cac" "github.com/ethersphere/bee/v2/pkg/crypto" @@ -39,12 +38,12 @@ func NewPutter(putter storage.Putter, signer crypto.Signer, topic []byte) (*Putt } // Put pushes an update to the feed through the chunk stores -func (u *Putter) Put(ctx context.Context, i Index, at int64, payload []byte) error { +func (u *Putter) Put(ctx context.Context, i Index, payload []byte) error { id, err := u.Feed.Update(i).Id() if err != nil { return err } - cac, err := toChunk(uint64(at), payload) + cac, err := toChunk(payload) if err != nil { return err } @@ -56,8 +55,6 @@ func (u *Putter) Put(ctx context.Context, i Index, at int64, payload []byte) err return u.putter.Put(ctx, ch) } -func toChunk(at uint64, payload []byte) (swarm.Chunk, error) { - ts := make([]byte, 8) - binary.BigEndian.PutUint64(ts, at) - return cac.New(append(ts, payload...)) +func toChunk(payload []byte) (swarm.Chunk, error) { + return cac.New(payload) } diff --git a/pkg/feeds/sequence/sequence.go b/pkg/feeds/sequence/sequence.go index 5361086de4b..5184885f1ab 100644 --- a/pkg/feeds/sequence/sequence.go +++ b/pkg/feeds/sequence/sequence.go @@ -79,14 +79,6 @@ func (f *finder) At(ctx context.Context, at int64, _ uint64) (ch swarm.Chunk, cu } return ch, current, &index{i}, nil } - ts, err := feeds.UpdatedAt(u) - if err != nil { - return nil, nil, nil, err - } - // if index is later than the `at` target index, then return previous chunk and index - if ts > uint64(at) { - return ch, &index{i - 1}, &index{i}, nil - } ch = u } } @@ -267,15 +259,6 @@ func (f *asyncFinder) get(ctx context.Context, at int64, idx uint64) (swarm.Chun // if 'not-found' error, then just silence and return nil chunk return nil, nil } - ts, err := feeds.UpdatedAt(u) - if err != nil { - return nil, err - } - // this means the update timestamp is later than the pivot time we are looking for - // handled as if the update was missing but with no uncertainty due to timeout - if at < int64(ts) { - return nil, nil - } return u, nil } @@ -297,7 +280,7 @@ func NewUpdater(putter storage.Putter, signer crypto.Signer, topic []byte) (feed // Update pushes an update to the feed through the chunk stores func (u *updater) Update(ctx context.Context, at int64, payload []byte) error { - err := u.Put(ctx, &index{u.next}, at, payload) + err := u.Put(ctx, &index{u.next}, payload) if err != nil { return err } diff --git a/pkg/feeds/testing/lookup.go b/pkg/feeds/testing/lookup.go index 8c71098f5a6..8fd852888c8 100644 --- a/pkg/feeds/testing/lookup.go +++ b/pkg/feeds/testing/lookup.go @@ -77,24 +77,22 @@ func TestFinderBasic(t *testing.T, finderf func(storage.Getter, *feeds.Feed) fee if err != nil { t.Fatal(err) } - ch, err := feeds.Latest(ctx, finder, 0) + soc, err := feeds.Latest(ctx, finder, 0) if err != nil { t.Fatal(err) } - if ch == nil { + if soc == nil { t.Fatalf("expected to find update, got none") } exp := payload - ts, payload, err := feeds.FromChunk(ch) + cac, err := feeds.FromChunk(soc) if err != nil { t.Fatal(err) } + payload = cac.Data()[swarm.SpanSize:] if !bytes.Equal(payload, exp) { t.Fatalf("result mismatch. want %8x... got %8x...", exp, payload) } - if ts != uint64(at) { - t.Fatalf("timestamp mismatch: expected %v, got %v", at, ts) - } }) } @@ -157,18 +155,6 @@ func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func( if ch == nil { t.Fatalf("expected to find update, got none") } - ts, payload, err := feeds.FromChunk(ch) - if err != nil { - t.Fatal(err) - } - content := binary.BigEndian.Uint64(payload) - if content != uint64(at) { - t.Fatalf("payload mismatch: expected %v, got %v", at, content) - } - - if ts != uint64(at) { - t.Fatalf("timestamp mismatch: expected %v, got %v", at, ts) - } if current != nil { expectedId := ch.Data()[:32] diff --git a/pkg/file/joiner/joiner.go b/pkg/file/joiner/joiner.go index fcd7e790c10..c3a317854bc 100644 --- a/pkg/file/joiner/joiner.go +++ b/pkg/file/joiner/joiner.go @@ -116,6 +116,12 @@ func New(ctx context.Context, g storage.Getter, putter storage.Putter, address s return nil, 0, err } + return NewJoiner(ctx, g, putter, address, rootChunk) +} + +// NewJoiner creates a new Joiner with the already fetched root chunk. +// A Joiner provides Read, Seek and Size functionalities. +func NewJoiner(ctx context.Context, g storage.Getter, putter storage.Putter, address swarm.Address, rootChunk swarm.Chunk) (file.Joiner, int64, error) { chunkData := rootChunk.Data() rootData := chunkData[swarm.SpanSize:] refLength := len(address.Bytes()) diff --git a/pkg/file/loadsave/loadsave.go b/pkg/file/loadsave/loadsave.go index 6a2a0bbf782..2899d9ed6ab 100644 --- a/pkg/file/loadsave/loadsave.go +++ b/pkg/file/loadsave/loadsave.go @@ -29,6 +29,7 @@ type loadSave struct { getter storage.Getter putter storage.Putter pipelineFn func() pipeline.Interface + rootCh swarm.Chunk } // New returns a new read-write load-saver. @@ -48,14 +49,33 @@ func NewReadonly(getter storage.Getter) file.LoadSaver { } } +// NewReadonlyWithRootCh returns a new read-only load-saver +// which will error on write. +func NewReadonlyWithRootCh(getter storage.Getter, rootCh swarm.Chunk) file.LoadSaver { + return &loadSave{ + getter: getter, + rootCh: rootCh, + } +} + func (ls *loadSave) Load(ctx context.Context, ref []byte) ([]byte, error) { - j, _, err := joiner.New(ctx, ls.getter, ls.putter, swarm.NewAddress(ref)) - if err != nil { - return nil, err + var j file.Joiner + if ls.rootCh == nil || !bytes.Equal(ls.rootCh.Address().Bytes(), ref[:swarm.HashSize]) { + joiner, _, err := joiner.New(ctx, ls.getter, ls.putter, swarm.NewAddress(ref)) + if err != nil { + return nil, err + } + j = joiner + } else { + joiner, _, err := joiner.NewJoiner(ctx, ls.getter, ls.putter, swarm.NewAddress(ref), ls.rootCh) + if err != nil { + return nil, err + } + j = joiner } buf := bytes.NewBuffer(nil) - _, err = file.JoinReadAll(ctx, j, buf) + _, err := file.JoinReadAll(ctx, j, buf) if err != nil { return nil, err } diff --git a/pkg/node/bootstrap.go b/pkg/node/bootstrap.go index 41e0c0aae18..2ccc0d8972b 100644 --- a/pkg/node/bootstrap.go +++ b/pkg/node/bootstrap.go @@ -196,10 +196,10 @@ func bootstrapNode( logger.Info("bootstrap: trying to fetch stamps snapshot") var ( - snapshotReference swarm.Address - reader file.Joiner - l int64 - eventsJSON []byte + snapshotRootCh swarm.Chunk + reader file.Joiner + l int64 + eventsJSON []byte ) for i := 0; i < getSnapshotRetries; i++ { @@ -210,7 +210,7 @@ func bootstrapNode( ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - snapshotReference, err = getLatestSnapshot(ctx, localStore.Download(true), snapshotFeed) + snapshotRootCh, err = getLatestSnapshot(ctx, localStore.Download(true), snapshotFeed) if err != nil { logger.Warning("bootstrap: fetching snapshot failed", "error", err) continue @@ -229,7 +229,7 @@ func bootstrapNode( ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - reader, l, err = joiner.New(ctx, localStore.Download(true), localStore.Cache(), snapshotReference) + reader, l, err = joiner.NewJoiner(ctx, localStore.Download(true), localStore.Cache(), snapshotRootCh.Address(), snapshotRootCh) if err != nil { logger.Warning("bootstrap: file joiner failed", "error", err) continue @@ -278,7 +278,7 @@ func getLatestSnapshot( ctx context.Context, st storage.Getter, address swarm.Address, -) (swarm.Address, error) { +) (swarm.Chunk, error) { ls := loadsave.NewReadonly(st) feedFactory := factory.New(st) @@ -287,12 +287,12 @@ func getLatestSnapshot( ls, ) if err != nil { - return swarm.ZeroAddress, fmt.Errorf("not a manifest: %w", err) + return nil, fmt.Errorf("not a manifest: %w", err) } e, err := m.Lookup(ctx, "/") if err != nil { - return swarm.ZeroAddress, fmt.Errorf("node lookup: %w", err) + return nil, fmt.Errorf("node lookup: %w", err) } var ( @@ -303,42 +303,37 @@ func getLatestSnapshot( if e := meta["swarm-feed-owner"]; e != "" { owner, err = hex.DecodeString(e) if err != nil { - return swarm.ZeroAddress, err + return nil, err } } if e := meta["swarm-feed-topic"]; e != "" { topic, err = hex.DecodeString(e) if err != nil { - return swarm.ZeroAddress, err + return nil, err } } if e := meta["swarm-feed-type"]; e != "" { err := t.FromString(e) if err != nil { - return swarm.ZeroAddress, err + return nil, err } } if len(owner) == 0 || len(topic) == 0 { - return swarm.ZeroAddress, fmt.Errorf("node lookup: %s", "feed metadata absent") + return nil, fmt.Errorf("node lookup: %s", "feed metadata absent") } f := feeds.New(topic, common.BytesToAddress(owner)) l, err := feedFactory.NewLookup(*t, f) if err != nil { - return swarm.ZeroAddress, fmt.Errorf("feed lookup failed: %w", err) + return nil, fmt.Errorf("feed lookup failed: %w", err) } u, _, _, err := l.At(ctx, time.Now().Unix(), 0) if err != nil { - return swarm.ZeroAddress, err - } - - _, ref, err := feeds.FromChunk(u) - if err != nil { - return swarm.ZeroAddress, err + return nil, err } - return swarm.NewAddress(ref), nil + return feeds.GetWrappedChunk(ctx, st, u) } func batchStoreExists(s storage.StateStorer) (bool, error) { diff --git a/pkg/node/node.go b/pkg/node/node.go index fbe7ffaccc5..ec855d55fce 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -173,6 +173,7 @@ type Options struct { WhitelistedWithdrawalAddress []string TrxDebugMode bool MinimumStorageRadius uint + ReserveCapacityDoubling int } const ( @@ -185,9 +186,7 @@ const ( minPaymentThreshold = 2 * refreshRate // minimal accepted payment threshold of full nodes maxPaymentThreshold = 24 * refreshRate // maximal accepted payment threshold of full nodes mainnetNetworkID = uint64(1) // - ReserveCapacity = 4_194_304 // 2^22 chunks reserveWakeUpDuration = 15 * time.Minute // time to wait before waking up reserveWorker - reserveTreshold = ReserveCapacity * 5 / 10 reserveMinEvictCount = 1_000 cacheMinEvictCount = 10_000 ) @@ -250,6 +249,12 @@ func NewBee( } }(b) + if o.ReserveCapacityDoubling < 0 || o.ReserveCapacityDoubling > 1 { + return nil, fmt.Errorf("config reserve capacity doubling has to be between default: 0 and maximum: 1") + } + + reserveCapacity := (1 << o.ReserveCapacityDoubling) * storer.DefaultReserveCapacity + stateStore, stateStoreMetrics, err := InitStateStore(logger, o.DataDir, o.StatestoreCacheCapacity) if err != nil { return nil, err @@ -361,7 +366,7 @@ func NewBee( func(id []byte) error { return evictFn(id) }, - ReserveCapacity, + reserveCapacity, logger, ) if err != nil { @@ -449,6 +454,8 @@ func NewBee( apiService.MountTechnicalDebug() apiService.SetProbe(probe) + apiService.SetSwarmAddress(&swarmAddress) + apiServer := &http.Server{ IdleTimeout: 30 * time.Second, ReadHeaderTimeout: 3 * time.Second, @@ -483,19 +490,19 @@ func NewBee( } } - if o.SwapEnable { - chequebookFactory, err = InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress) - if err != nil { - return nil, err - } + chequebookFactory, err = InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress) + if err != nil { + return nil, err + } - erc20Address, err := chequebookFactory.ERC20Address(ctx) - if err != nil { - return nil, fmt.Errorf("factory fail: %w", err) - } + erc20Address, err := chequebookFactory.ERC20Address(ctx) + if err != nil { + return nil, fmt.Errorf("factory fail: %w", err) + } - erc20Service = erc20.New(transactionService, erc20Address) + erc20Service = erc20.New(transactionService, erc20Address) + if o.SwapEnable { if o.ChequebookEnable && chainEnabled { chequebookService, err = InitChequebookService( ctx, @@ -723,10 +730,11 @@ func NewBee( if o.FullNodeMode && !o.BootnodeMode { // configure reserve only for full node - lo.ReserveCapacity = ReserveCapacity + lo.ReserveCapacity = reserveCapacity lo.ReserveWakeUpDuration = reserveWakeUpDuration lo.ReserveMinEvictCount = reserveMinEvictCount lo.RadiusSetter = kad + lo.ReserveCapacityDoubling = o.ReserveCapacityDoubling } localStore, err := storer.New(ctx, path, lo) @@ -900,7 +908,7 @@ func NewBee( return nil, fmt.Errorf("status service: %w", err) } - saludService := salud.New(nodeStatus, kad, localStore, logger, warmupTime, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile) + saludService := salud.New(nodeStatus, kad, localStore, logger, warmupTime, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile, uint8(o.ReserveCapacityDoubling)) b.saludCloser = saludService rC, unsub := saludService.SubscribeNetworkStorageRadius() @@ -944,7 +952,7 @@ func NewBee( } } - pushSyncProtocol := pushsync.New(swarmAddress, networkID, nonce, p2ps, localStore, waitNetworkRFunc, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, warmupTime) + pushSyncProtocol := pushsync.New(swarmAddress, networkID, nonce, p2ps, localStore, waitNetworkRFunc, kad, o.FullNodeMode && !o.BootnodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, warmupTime) b.pushSyncCloser = pushSyncProtocol // set the pushSyncer in the PSS @@ -996,20 +1004,46 @@ func NewBee( stakingContractAddress = common.HexToAddress(o.StakingContractAddress) } - stakingContract := staking.New(overlayEthAddress, stakingContractAddress, abiutil.MustParseABI(chainCfg.StakingABI), bzzTokenAddress, transactionService, common.BytesToHash(nonce), o.TrxDebugMode) + stakingContract := staking.New(overlayEthAddress, stakingContractAddress, abiutil.MustParseABI(chainCfg.StakingABI), bzzTokenAddress, transactionService, common.BytesToHash(nonce), o.TrxDebugMode, uint8(o.ReserveCapacityDoubling)) - if chainEnabled && changedOverlay { - stake, err := stakingContract.GetPotentialStake(ctx) + if chainEnabled { + + if changedOverlay { + stake, err := stakingContract.GetPotentialStake(ctx) + if err != nil { + return nil, err + } + if stake.Cmp(big.NewInt(0)) > 0 { + logger.Debug("changing overlay address in staking contract") + tx, err := stakingContract.ChangeStakeOverlay(ctx, common.BytesToHash(nonce)) + if err != nil { + return nil, fmt.Errorf("cannot change staking overlay address: %v", err.Error()) + } + logger.Info("overlay address changed in staking contract", "transaction", tx) + } + } + + // make sure that the staking contract has the up to date height + tx, updated, err := stakingContract.UpdateHeight(ctx) if err != nil { - return nil, errors.New("getting stake balance") + return nil, err } - if stake.Cmp(big.NewInt(0)) > 0 { - logger.Debug("changing overlay address in staking contract") - tx, err := stakingContract.ChangeStakeOverlay(ctx, common.BytesToHash(nonce)) + if updated { + logger.Info("updated new reserve capacity doubling height in the staking contract", "transaction", tx, "new_height", o.ReserveCapacityDoubling) + } + + if o.ReserveCapacityDoubling > 0 { + stake, err := stakingContract.GetPotentialStake(ctx) if err != nil { - return nil, fmt.Errorf("cannot change staking overlay address: %v", err.Error()) + return nil, err + } + if stake.Cmp(big.NewInt(0)) > 0 { + // Check if the staked amount is sufficient to cover the additional neighborhoods. + // The staked amount must be at least 2^h * MinimumStake. + if stake.Cmp(big.NewInt(0).Mul(big.NewInt(1<= reserveTreshold && pullerService.SyncRate() == 0 } @@ -1058,6 +1093,7 @@ func NewBee( transactionService, saludService, logger, + uint8(o.ReserveCapacityDoubling), ) if err != nil { return nil, fmt.Errorf("storage incentives agent: %w", err) @@ -1154,7 +1190,6 @@ func NewBee( apiService.MountDebug() apiService.MountAPI() - apiService.SetSwarmAddress(&swarmAddress) apiService.SetRedistributionAgent(agent) } diff --git a/pkg/postage/listener/listener.go b/pkg/postage/listener/listener.go index 39b6532d8fe..9750c09b7e3 100644 --- a/pkg/postage/listener/listener.go +++ b/pkg/postage/listener/listener.go @@ -329,7 +329,7 @@ func (l *listener) Listen(ctx context.Context, from uint64, updater postage.Even events, err := l.ev.FilterLogs(ctx, l.filterQuery(big.NewInt(int64(from)), big.NewInt(int64(to)))) if err != nil { l.metrics.BackendErrors.Inc() - l.logger.Warning("could not get logs", "error", err) + l.logger.Warning("could not get blockchain log", "error", err) lastConfirmedBlock = 0 continue } diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go index 36a0be87ea4..a917169e0d6 100644 --- a/pkg/pullsync/pullsync.go +++ b/pkg/pullsync/pullsync.go @@ -51,7 +51,7 @@ const ( pageTimeout = time.Second makeOfferTimeout = 15 * time.Minute handleMaxChunksPerSecond = 250 - handleRequestsLimitRate = time.Second / handleMaxChunksPerSecond // handle max 100 chunks per second per peer + handleRequestsLimitRate = time.Second / handleMaxChunksPerSecond // handle max `handleMaxChunksPerSecond` chunks per second per peer ) // Interface is the PullSync interface. diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go index d0ada7f74d3..397362499b4 100644 --- a/pkg/salud/salud.go +++ b/pkg/salud/salud.go @@ -40,11 +40,6 @@ type peerStatus interface { PeerSnapshot(ctx context.Context, peer swarm.Address) (*status.Snapshot, error) } -type reserve interface { - storer.RadiusChecker - ReserveSize() int -} - type service struct { wg sync.WaitGroup quit chan struct{} @@ -53,34 +48,38 @@ type service struct { status peerStatus metrics metrics isSelfHealthy *atomic.Bool - reserve reserve + reserve storer.RadiusChecker radiusSubsMtx sync.Mutex radiusC []chan uint8 + + capacityDoubling uint8 } func New( status peerStatus, topology topologyDriver, - reserve reserve, + reserve storer.RadiusChecker, logger log.Logger, warmup time.Duration, mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64, + capacityDoubling uint8, ) *service { metrics := newMetrics() s := &service{ - quit: make(chan struct{}), - logger: logger.WithName(loggerName).Register(), - status: status, - topology: topology, - metrics: metrics, - isSelfHealthy: atomic.NewBool(true), - reserve: reserve, + quit: make(chan struct{}), + logger: logger.WithName(loggerName).Register(), + status: status, + topology: topology, + metrics: metrics, + isSelfHealthy: atomic.NewBool(true), + reserve: reserve, + capacityDoubling: capacityDoubling, } s.wg.Add(1) @@ -200,7 +199,7 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, continue } - if networkRadius > 0 && peer.status.StorageRadius < uint32(networkRadius-1) { + if networkRadius > 0 && peer.status.StorageRadius < uint32(networkRadius-2) { s.logger.Debug("radius health failure", "radius", peer.status.StorageRadius, "peer_address", peer.addr) } else if peer.dur.Seconds() > pDur { s.logger.Debug("response duration below threshold", "duration", peer.dur, "peer_address", peer.addr) @@ -221,10 +220,12 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, } } + networkRadiusEstimation := s.reserve.StorageRadius() + s.capacityDoubling + selfHealth := true - if nHoodRadius == networkRadius && s.reserve.StorageRadius() != networkRadius { + if nHoodRadius == networkRadius && networkRadiusEstimation != networkRadius { selfHealth = false - s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", s.reserve.StorageRadius(), "network_radius", networkRadius) + s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", networkRadiusEstimation, "network_radius", networkRadius) } s.isSelfHealthy.Store(selfHealth) diff --git a/pkg/salud/salud_test.go b/pkg/salud/salud_test.go index b408f06b95c..5fc4dda733d 100644 --- a/pkg/salud/salud_test.go +++ b/pkg/salud/salud_test.go @@ -17,6 +17,7 @@ import ( mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" "github.com/ethersphere/bee/v2/pkg/swarm" topMock "github.com/ethersphere/bee/v2/pkg/topology/mock" + "github.com/ethersphere/bee/v2/pkg/util/testutil" ) type peer struct { @@ -37,11 +38,11 @@ func TestSalud(t *testing.T) { {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - // healthy since radius >= most common radius - 1 + // healthy since radius >= most common radius - 2 {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, // radius too low - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 6, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 5, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false}, // dur too long {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 2, false}, @@ -69,7 +70,7 @@ func TestSalud(t *testing.T) { mockstorer.WithReserveSize(100), ) - service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8) + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 0) err := spinlock.Wait(time.Minute, func() bool { return len(topM.PeersHealth()) == len(peers) @@ -115,7 +116,8 @@ func TestSelfUnhealthyRadius(t *testing.T) { mockstorer.WithReserveSize(100), ) - service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8) + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 0) + testutil.CleanupCloser(t, service) err := spinlock.Wait(time.Minute, func() bool { return len(topM.PeersHealth()) == len(peers) @@ -127,10 +129,43 @@ func TestSelfUnhealthyRadius(t *testing.T) { if service.IsHealthy() { t.Fatalf("self should NOT be healthy") } +} - if err := service.Close(); err != nil { +func TestSelfHealthyCapacityDoubling(t *testing.T) { + t.Parallel() + peers := []peer{ + // fully healhy + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true}, + } + + statusM := &statusMock{make(map[string]peer)} + addrs := make([]swarm.Address, 0, len(peers)) + for _, p := range peers { + addrs = append(addrs, p.addr) + statusM.peers[p.addr.ByteString()] = p + } + + topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) + + reserve := mockstorer.NewReserve( + mockstorer.WithRadius(6), + mockstorer.WithReserveSize(100), + ) + + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 2) + testutil.CleanupCloser(t, service) + + err := spinlock.Wait(time.Minute, func() bool { + return len(topM.PeersHealth()) == len(peers) + }) + if err != nil { t.Fatal(err) } + + if !service.IsHealthy() { + t.Fatalf("self should be healthy") + } } func TestSubToRadius(t *testing.T) { @@ -148,7 +183,7 @@ func TestSubToRadius(t *testing.T) { topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) - service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8) + service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8, 0) c, unsub := service.SubscribeNetworkStorageRadius() t.Cleanup(unsub) @@ -181,7 +216,8 @@ func TestUnsub(t *testing.T) { topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) - service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8) + service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8, 0) + testutil.CleanupCloser(t, service) c, unsub := service.SubscribeNetworkStorageRadius() unsub() @@ -191,10 +227,6 @@ func TestUnsub(t *testing.T) { t.Fatal("should not have received an address") case <-time.After(time.Second): } - - if err := service.Close(); err != nil { - t.Fatal(err) - } } type statusMock struct { diff --git a/pkg/soc/testing/soc.go b/pkg/soc/testing/soc.go index a622bf2d1dc..b6f7e2a7176 100644 --- a/pkg/soc/testing/soc.go +++ b/pkg/soc/testing/soc.go @@ -70,17 +70,35 @@ func GenerateMockSocWithSigner(t *testing.T, data []byte, signer crypto.Signer) func GenerateMockSOC(t *testing.T, data []byte) *MockSOC { t.Helper() - privKey, err := crypto.GenerateSecp256k1Key() + ch, err := cac.New(data) if err != nil { t.Fatal(err) } - signer := crypto.NewDefaultSigner(privKey) - owner, err := signer.EthereumAddress() + + return generateMockSOC(t, ch) +} + +// GenerateMockSOC generates a valid mocked SOC from given chunk data (span + payload). +func GenerateMockSOCWithSpan(t *testing.T, data []byte) *MockSOC { + t.Helper() + + ch, err := cac.NewWithDataSpan(data) if err != nil { t.Fatal(err) } - ch, err := cac.New(data) + return generateMockSOC(t, ch) +} + +func generateMockSOC(t *testing.T, ch swarm.Chunk) *MockSOC { + t.Helper() + + privKey, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + signer := crypto.NewDefaultSigner(privKey) + owner, err := signer.EthereumAddress() if err != nil { t.Fatal(err) } diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index b71a77c3dff..afd4614b4e3 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -70,6 +70,7 @@ type Agent struct { chainStateGetter postage.ChainStateGetter commitLock sync.Mutex health Health + capacityDoubling uint8 } func New(overlay swarm.Address, @@ -89,6 +90,7 @@ func New(overlay swarm.Address, tranService transaction.Service, health Health, logger log.Logger, + capacityDoubling uint8, ) (*Agent, error) { a := &Agent{ overlay: overlay, @@ -104,6 +106,7 @@ func New(overlay swarm.Address, redistributionStatuser: redistributionStatuser, health: health, chainStateGetter: chainStateGetter, + capacityDoubling: capacityDoubling, } state, err := NewRedistributionState(logger, ethAddress, stateStore, erc20Service, tranService) @@ -389,14 +392,15 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error { } func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { - storageRadius := a.store.StorageRadius() + // minimum proximity between the achor and the stored chunks + commitedDepth := a.store.StorageRadius() + a.capacityDoubling if a.state.IsFrozen() { a.logger.Info("skipping round because node is frozen") return false, nil } - isPlaying, err := a.contract.IsPlaying(ctx, storageRadius) + isPlaying, err := a.contract.IsPlaying(ctx, commitedDepth) if err != nil { a.metrics.ErrCheckIsPlaying.Inc() return false, err @@ -429,21 +433,21 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { } now := time.Now() - sample, err := a.makeSample(ctx, storageRadius) + sample, err := a.makeSample(ctx, commitedDepth) if err != nil { return false, err } dur := time.Since(now) a.metrics.SampleDuration.Set(dur.Seconds()) - a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", sample.StorageRadius, "round", round) + a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", commitedDepth, "round", round) a.state.SetSampleData(round, sample, dur) return true, nil } -func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData, error) { +func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData, error) { salt, err := a.contract.ReserveSalt(ctx) if err != nil { return SampleData{}, err @@ -454,7 +458,7 @@ func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData return SampleData{}, err } - rSample, err := a.store.ReserveSample(ctx, salt, storageRadius, uint64(timeLimiter), a.minBatchBalance()) + rSample, err := a.store.ReserveSample(ctx, salt, commitedDepth, uint64(timeLimiter), a.minBatchBalance()) if err != nil { return SampleData{}, err } @@ -468,7 +472,7 @@ func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData Anchor1: salt, ReserveSampleItems: rSample.Items, ReserveSampleHash: sampleHash, - StorageRadius: storageRadius, + StorageRadius: commitedDepth, } return sample, nil diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go index ae078f1a871..0ae0eda22f9 100644 --- a/pkg/storageincentives/agent_test.go +++ b/pkg/storageincentives/agent_test.go @@ -41,6 +41,7 @@ func TestAgent(t *testing.T) { limit uint64 expectedCalls bool balance *big.Int + doubling uint8 }{{ name: "3 blocks per phase, same block number returns twice", blocksPerRound: 9, @@ -49,6 +50,7 @@ func TestAgent(t *testing.T) { expectedCalls: true, limit: 108, // computed with blocksPerRound * (exptectedCalls + 2) balance: bigBalance, + doubling: 1, }, { name: "3 blocks per phase, block number returns every block", blocksPerRound: 9, @@ -57,6 +59,7 @@ func TestAgent(t *testing.T) { expectedCalls: true, limit: 108, balance: bigBalance, + doubling: 0, }, { name: "no expected calls - block number returns late after each phase", blocksPerRound: 9, @@ -65,6 +68,7 @@ func TestAgent(t *testing.T) { expectedCalls: false, limit: 108, balance: bigBalance, + doubling: 0, }, { name: "4 blocks per phase, block number returns every other block", blocksPerRound: 12, @@ -73,6 +77,7 @@ func TestAgent(t *testing.T) { expectedCalls: true, limit: 144, balance: bigBalance, + doubling: 1, }, { // This test case is based on previous, but this time agent will not have enough // balance to participate in the game so no calls are going to be made. @@ -83,6 +88,7 @@ func TestAgent(t *testing.T) { expectedCalls: false, limit: 144, balance: big.NewInt(0), + doubling: 1, }, } @@ -106,9 +112,12 @@ func TestAgent(t *testing.T) { block: tc.blocksPerRound, balance: tc.balance, } - contract := &mockContract{} - service, _ := createService(t, addr, backend, contract, tc.blocksPerRound, tc.blocksPerPhase) + var radius uint8 = 8 + + contract := &mockContract{t: t, expectedRadius: radius + tc.doubling} + + service, _ := createService(t, addr, backend, contract, tc.blocksPerRound, tc.blocksPerPhase, radius, tc.doubling) testutil.CleanupCloser(t, service) <-wait @@ -156,7 +165,10 @@ func createService( backend storageincentives.ChainBackend, contract redistribution.Contract, blocksPerRound uint64, - blocksPerPhase uint64) (*storageincentives.Agent, error) { + blocksPerPhase uint64, + radius uint8, + doubling uint8, +) (*storageincentives.Agent, error) { t.Helper() postageContract := contractMock.New(contractMock.WithExpiresBatchesFunc(func(context.Context) error { @@ -168,7 +180,7 @@ func createService( })) reserve := resMock.NewReserve( - resMock.WithRadius(0), + resMock.WithRadius(radius), resMock.WithSample(storer.RandSample(t, nil)), ) @@ -189,6 +201,7 @@ func createService( transactionmock.New(), &mockHealth{}, log.Noop, + doubling, ) } @@ -257,15 +270,20 @@ const ( ) type mockContract struct { - callsList []contractCall - mtx sync.Mutex + callsList []contractCall + mtx sync.Mutex + expectedRadius uint8 + t *testing.T } func (m *mockContract) ReserveSalt(context.Context) ([]byte, error) { return nil, nil } -func (m *mockContract) IsPlaying(context.Context, uint8) (bool, error) { +func (m *mockContract) IsPlaying(_ context.Context, r uint8) (bool, error) { + if r != m.expectedRadius { + m.t.Fatalf("isPlaying: expected radius %d, got %d", m.expectedRadius, r) + } return true, nil } @@ -290,9 +308,14 @@ func (m *mockContract) Commit(context.Context, []byte, uint64) (common.Hash, err return common.Hash{}, nil } -func (m *mockContract) Reveal(context.Context, uint8, []byte, []byte) (common.Hash, error) { +func (m *mockContract) Reveal(_ context.Context, r uint8, _ []byte, _ []byte) (common.Hash, error) { m.mtx.Lock() defer m.mtx.Unlock() + + if r != m.expectedRadius { + m.t.Fatalf("reveal: expected radius %d, got %d", m.expectedRadius, r) + } + m.callsList = append(m.callsList, revealCall) return common.Hash{}, nil } diff --git a/pkg/storageincentives/staking/contract.go b/pkg/storageincentives/staking/contract.go index 966efaee8c5..cf95baec874 100644 --- a/pkg/storageincentives/staking/contract.go +++ b/pkg/storageincentives/staking/contract.go @@ -44,6 +44,7 @@ type Contract interface { GetWithdrawableStake(ctx context.Context) (*big.Int, error) WithdrawStake(ctx context.Context) (common.Hash, error) MigrateStake(ctx context.Context) (common.Hash, error) + UpdateHeight(ctx context.Context) (common.Hash, bool, error) RedistributionStatuser } @@ -59,6 +60,7 @@ type contract struct { transactionService transaction.Service overlayNonce common.Hash gasLimit uint64 + height uint8 } func New( @@ -69,6 +71,7 @@ func New( transactionService transaction.Service, nonce common.Hash, setGasLimit bool, + height uint8, ) Contract { var gasLimit uint64 @@ -84,6 +87,7 @@ func New( transactionService: transactionService, overlayNonce: nonce, gasLimit: gasLimit, + height: height, } } @@ -113,7 +117,7 @@ func (c *contract) DepositStake(ctx context.Context, stakedAmount *big.Int) (com return common.Hash{}, err } - receipt, err := c.sendDepositStakeTransaction(ctx, stakedAmount, c.overlayNonce) + receipt, err := c.sendManageStakeTransaction(ctx, stakedAmount) if err != nil { return common.Hash{}, err } @@ -124,7 +128,7 @@ func (c *contract) DepositStake(ctx context.Context, stakedAmount *big.Int) (com // ChangeStakeOverlay only changes the overlay address used in the redistribution game. func (c *contract) ChangeStakeOverlay(ctx context.Context, nonce common.Hash) (common.Hash, error) { c.overlayNonce = nonce - receipt, err := c.sendDepositStakeTransaction(ctx, new(big.Int), c.overlayNonce) + receipt, err := c.sendManageStakeTransaction(ctx, new(big.Int)) if err != nil { return common.Hash{}, err } @@ -132,6 +136,26 @@ func (c *contract) ChangeStakeOverlay(ctx context.Context, nonce common.Hash) (c return receipt.TxHash, nil } +// UpdateHeight submits the reserve doubling height to the contract only if the height is a new value. +func (c *contract) UpdateHeight(ctx context.Context) (common.Hash, bool, error) { + + h, err := c.getHeight(ctx) + if err != nil { + return common.Hash{}, false, fmt.Errorf("staking contract: failed to read previous height: %w", err) + } + + if h == c.height { + return common.Hash{}, false, nil + } + + receipt, err := c.sendManageStakeTransaction(ctx, new(big.Int)) + if err != nil { + return common.Hash{}, false, fmt.Errorf("staking contract: failed to write new height: %w", err) + } + + return receipt.TxHash, true, nil +} + func (c *contract) GetPotentialStake(ctx context.Context) (*big.Int, error) { stakedAmount, err := c.getPotentialStake(ctx) if err != nil { @@ -292,8 +316,8 @@ func (c *contract) sendTransaction(ctx context.Context, callData []byte, desc st return receipt, nil } -func (c *contract) sendDepositStakeTransaction(ctx context.Context, stakedAmount *big.Int, nonce common.Hash) (*types.Receipt, error) { - callData, err := c.stakingContractABI.Pack("manageStake", nonce, stakedAmount) +func (c *contract) sendManageStakeTransaction(ctx context.Context, stakedAmount *big.Int) (*types.Receipt, error) { + callData, err := c.stakingContractABI.Pack("manageStake", c.overlayNonce, stakedAmount, c.height) if err != nil { return nil, err } @@ -439,3 +463,28 @@ func (c *contract) paused(ctx context.Context) (bool, error) { return results[0].(bool), nil } + +func (c *contract) getHeight(ctx context.Context) (uint8, error) { + callData, err := c.stakingContractABI.Pack("heightOfAddress", c.owner) + if err != nil { + return 0, err + } + + result, err := c.transactionService.Call(ctx, &transaction.TxRequest{ + To: &c.stakingContractAddress, + Data: callData, + }) + if err != nil { + return 0, err + } + results, err := c.stakingContractABI.Unpack("heightOfAddress", result) + if err != nil { + return 0, err + } + + if len(results) == 0 { + return 0, errors.New("unexpected empty results") + } + + return results[0].(uint8), nil +} diff --git a/pkg/storageincentives/staking/contract_test.go b/pkg/storageincentives/staking/contract_test.go index 4fa0a36ff4e..080ed13c324 100644 --- a/pkg/storageincentives/staking/contract_test.go +++ b/pkg/storageincentives/staking/contract_test.go @@ -25,6 +25,8 @@ import ( var stakingContractABI = abiutil.MustParseABI(chaincfg.Testnet.StakingABI) +const stakingHeight = uint8(0) + func TestIsOverlayFrozen(t *testing.T) { t.Parallel() @@ -55,6 +57,7 @@ func TestIsOverlayFrozen(t *testing.T) { ), nonce, false, + stakingHeight, ) frozen, err := contract.IsOverlayFrozen(ctx, uint64(height-1)) @@ -95,7 +98,7 @@ func TestDepositStake(t *testing.T) { totalAmount := big.NewInt(100000000000000000) prevStake := big.NewInt(0) - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) if err != nil { t.Fatal(err) } @@ -143,6 +146,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.DepositStake(ctx, stakedAmount) @@ -156,7 +160,7 @@ func TestDepositStake(t *testing.T) { totalAmount := big.NewInt(100000000000000000) prevStake := big.NewInt(2) - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, big.NewInt(100000000000000000)) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, big.NewInt(100000000000000000), stakingHeight) if err != nil { t.Fatal(err) } @@ -204,6 +208,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.DepositStake(ctx, stakedAmount) @@ -243,6 +248,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.DepositStake(ctx, big.NewInt(0)) @@ -275,6 +281,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.DepositStake(ctx, big.NewInt(100000000000000000)) @@ -307,6 +314,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.DepositStake(ctx, big.NewInt(100000000000000000)) @@ -345,6 +353,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.DepositStake(ctx, stakedAmount) @@ -394,6 +403,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.DepositStake(ctx, stakedAmount) @@ -456,6 +466,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.DepositStake(ctx, stakedAmount) @@ -516,6 +527,7 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.DepositStake(ctx, stakedAmount) @@ -542,9 +554,269 @@ func TestDepositStake(t *testing.T) { ), nonce, false, + stakingHeight, + ) + + _, err := contract.DepositStake(ctx, stakedAmount) + if err == nil { + t.Fatalf("expected error") + } + }) +} + +func TestChangeHeight(t *testing.T) { + t.Parallel() + + ctx := context.Background() + owner := common.HexToAddress("abcd") + stakingContractAddress := common.HexToAddress("ffff") + bzzTokenAddress := common.HexToAddress("eeee") + nonce := common.BytesToHash(make([]byte, 32)) + txHashDeposited := common.HexToHash("c3a7") + stakedAmount := big.NewInt(0) + txHashApprove := common.HexToHash("abb0") + + t.Run("ok", func(t *testing.T) { + t.Parallel() + + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) + if err != nil { + t.Fatal(err) + } + + contract := staking.New( + owner, + stakingContractAddress, + stakingContractABI, + bzzTokenAddress, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + if *request.To == stakingContractAddress { + if !bytes.Equal(expectedCallData[:80], request.Data[:80]) { + return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + } + return txHashDeposited, nil + } + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + if txHash == txHashDeposited { + return &types.Receipt{ + Status: 1, + }, nil + } + if txHash == txHashApprove { + return &types.Receipt{ + Status: 1, + }, nil + } + return nil, errors.New("unknown tx hash") + }), + transactionMock.WithCallFunc(func(ctx context.Context, request *transaction.TxRequest) (result []byte, err error) { + if *request.To == stakingContractAddress { + ret := make([]byte, 32) + ret[1] = stakingHeight + return ret, nil + } + return nil, errors.New("unexpected call") + }), + ), + nonce, + false, + stakingHeight, + ) + + _, updated, err := contract.UpdateHeight(ctx) + if err != nil { + t.Fatal(err) + } + if updated { + t.Fatal("expected height not to change") + } + }) + + t.Run("ok - height increased", func(t *testing.T) { + t.Parallel() + + var ( + oldHeight uint8 = 0 + newHeight uint8 = 1 + ) + + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, newHeight) + if err != nil { + t.Fatal(err) + } + + contract := staking.New( + owner, + stakingContractAddress, + stakingContractABI, + bzzTokenAddress, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + if *request.To == stakingContractAddress { + if !bytes.Equal(expectedCallData[:80], request.Data[:80]) { + return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + } + return txHashDeposited, nil + } + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + if txHash == txHashDeposited { + return &types.Receipt{ + Status: 1, + }, nil + } + if txHash == txHashApprove { + return &types.Receipt{ + Status: 1, + }, nil + } + return nil, errors.New("unknown tx hash") + }), + transactionMock.WithCallFunc(func(ctx context.Context, request *transaction.TxRequest) (result []byte, err error) { + if *request.To == stakingContractAddress { + ret := make([]byte, 32) + ret[31] = oldHeight + return ret, nil + } + return nil, errors.New("unexpected call") + }), + ), + nonce, + false, + newHeight, + ) + + _, updated, err := contract.UpdateHeight(ctx) + if err != nil { + t.Fatal(err) + } + if !updated { + t.Fatal("expected height to change") + } + }) + + t.Run("ok - height decreased", func(t *testing.T) { + t.Parallel() + + var ( + oldHeight uint8 = 1 + newHeight uint8 = 0 + ) + + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, newHeight) + if err != nil { + t.Fatal(err) + } + + contract := staking.New( + owner, + stakingContractAddress, + stakingContractABI, + bzzTokenAddress, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + if *request.To == stakingContractAddress { + if !bytes.Equal(expectedCallData[:80], request.Data[:80]) { + return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + } + return txHashDeposited, nil + } + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + if txHash == txHashDeposited { + return &types.Receipt{ + Status: 1, + }, nil + } + if txHash == txHashApprove { + return &types.Receipt{ + Status: 1, + }, nil + } + return nil, errors.New("unknown tx hash") + }), + transactionMock.WithCallFunc(func(ctx context.Context, request *transaction.TxRequest) (result []byte, err error) { + if *request.To == stakingContractAddress { + ret := make([]byte, 32) + ret[31] = oldHeight + return ret, nil + } + return nil, errors.New("unexpected call") + }), + ), + nonce, + false, + newHeight, + ) + + _, updated, err := contract.UpdateHeight(ctx) + if err != nil { + t.Fatal(err) + } + if !updated { + t.Fatal("expected height to change") + } + }) + + t.Run("send tx failed", func(t *testing.T) { + t.Parallel() + + prevStake := big.NewInt(0) + + contract := staking.New( + owner, + stakingContractAddress, + stakingContractABI, + bzzTokenAddress, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithCallFunc(func(ctx context.Context, request *transaction.TxRequest) (result []byte, err error) { + if *request.To == stakingContractAddress { + return prevStake.FillBytes(make([]byte, 32)), nil + } + return nil, errors.New("unexpected call") + }), + ), + nonce, + false, + stakingHeight, ) _, err := contract.DepositStake(ctx, stakedAmount) + if err == nil { + t.Fatal("expected error") + } + }) + + t.Run("transaction error in call", func(t *testing.T) { + t.Parallel() + + contract := staking.New( + owner, + stakingContractAddress, + stakingContractABI, + bzzTokenAddress, + transactionMock.New( + transactionMock.WithCallFunc(func(ctx context.Context, request *transaction.TxRequest) (result []byte, err error) { + if *request.To == stakingContractAddress { + return nil, errors.New("some error") + } + return nil, errors.New("unexpected call") + }), + ), + nonce, + false, + stakingHeight, + ) + + _, _, err := contract.UpdateHeight(ctx) if err == nil { t.Fatalf("expected error") } @@ -566,7 +838,7 @@ func TestChangeStakeOverlay(t *testing.T) { t.Run("ok", func(t *testing.T) { t.Parallel() - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) if err != nil { t.Fatal(err) } @@ -597,6 +869,7 @@ func TestChangeStakeOverlay(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.ChangeStakeOverlay(ctx, nonce) @@ -623,6 +896,7 @@ func TestChangeStakeOverlay(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.ChangeStakeOverlay(ctx, nonce) @@ -634,7 +908,7 @@ func TestChangeStakeOverlay(t *testing.T) { t.Run("invalid call data", func(t *testing.T) { t.Parallel() - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) if err != nil { t.Fatal(err) } @@ -657,6 +931,7 @@ func TestChangeStakeOverlay(t *testing.T) { ), nonce, false, + stakingHeight, ) newNonce := make([]byte, 32) @@ -671,7 +946,7 @@ func TestChangeStakeOverlay(t *testing.T) { t.Run("transaction reverted", func(t *testing.T) { t.Parallel() - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) if err != nil { t.Fatal(err) } @@ -702,6 +977,7 @@ func TestChangeStakeOverlay(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.ChangeStakeOverlay(ctx, nonce) @@ -713,7 +989,7 @@ func TestChangeStakeOverlay(t *testing.T) { t.Run("transaction error", func(t *testing.T) { t.Parallel() - expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount) + expectedCallData, err := stakingContractABI.Pack("manageStake", nonce, stakedAmount, stakingHeight) if err != nil { t.Fatal(err) } @@ -742,6 +1018,7 @@ func TestChangeStakeOverlay(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.ChangeStakeOverlay(ctx, nonce) @@ -788,6 +1065,7 @@ func TestGetCommittedStake(t *testing.T) { ), nonce, false, + stakingHeight, ) stakedAmount, err := contract.GetPotentialStake(ctx) @@ -825,6 +1103,7 @@ func TestGetCommittedStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.GetPotentialStake(ctx) @@ -862,6 +1141,7 @@ func TestGetCommittedStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.GetPotentialStake(ctx) @@ -885,6 +1165,7 @@ func TestGetCommittedStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.GetPotentialStake(ctx) @@ -931,6 +1212,7 @@ func TestGetWithdrawableStake(t *testing.T) { ), nonce, false, + stakingHeight, ) withdrawableStake, err := contract.GetWithdrawableStake(ctx) @@ -968,6 +1250,7 @@ func TestGetWithdrawableStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.GetPotentialStake(ctx) @@ -991,6 +1274,7 @@ func TestGetWithdrawableStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err := contract.GetPotentialStake(ctx) @@ -1057,6 +1341,7 @@ func TestWithdrawStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1092,6 +1377,7 @@ func TestWithdrawStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1149,6 +1435,7 @@ func TestWithdrawStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1204,6 +1491,7 @@ func TestWithdrawStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1237,6 +1525,7 @@ func TestWithdrawStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1312,6 +1601,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.MigrateStake(ctx) @@ -1346,6 +1636,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.MigrateStake(ctx) @@ -1426,6 +1717,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.MigrateStake(ctx) @@ -1490,6 +1782,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.MigrateStake(ctx) @@ -1524,6 +1817,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.WithdrawStake(ctx) @@ -1566,6 +1860,7 @@ func TestMigrateStake(t *testing.T) { ), nonce, false, + stakingHeight, ) _, err = contract.MigrateStake(ctx) diff --git a/pkg/storageincentives/staking/mock/contract.go b/pkg/storageincentives/staking/mock/contract.go index 93fab73f937..131de14dba6 100644 --- a/pkg/storageincentives/staking/mock/contract.go +++ b/pkg/storageincentives/staking/mock/contract.go @@ -28,6 +28,10 @@ func (s *stakingContractMock) ChangeStakeOverlay(_ context.Context, h common.Has return h, nil } +func (s *stakingContractMock) UpdateHeight(_ context.Context) (common.Hash, bool, error) { + return common.Hash{}, false, nil +} + func (s *stakingContractMock) GetPotentialStake(ctx context.Context) (*big.Int, error) { return s.getStake(ctx) } diff --git a/pkg/storer/cachestore.go b/pkg/storer/cachestore.go index 5051e975da8..b4a759e57ac 100644 --- a/pkg/storer/cachestore.go +++ b/pkg/storer/cachestore.go @@ -40,8 +40,8 @@ func (db *DB) cacheWorker(ctx context.Context) { } evict := size - capc - if evict < db.opts.cacheMinEvictCount { // evict at least a min count - evict = db.opts.cacheMinEvictCount + if evict < db.reserveOptions.cacheMinEvictCount { // evict at least a min count + evict = db.reserveOptions.cacheMinEvictCount } dur := captureDuration(time.Now()) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index aca61a48a53..45af6919a4c 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -503,7 +503,6 @@ func (r *Reserve) IterateChunksItems(startBin uint8, cb func(*ChunkBinItem) (boo PrefixAtStart: true, }, func(res storage.Result) (bool, error) { item := res.Entry.(*ChunkBinItem) - stop, err := cb(item) if stop || err != nil { return true, err @@ -519,9 +518,18 @@ func (r *Reserve) Reset(ctx context.Context) error { size := r.Size() - bRitems := make([]*BatchRadiusItem, 0, size) + // step 1: delete epoch timestamp + err := r.st.Run(ctx, func(s transaction.Store) error { return s.IndexStore().Delete(&EpochItem{}) }) + if err != nil { + return err + } - err := r.st.IndexStore().Iterate(storage.Query{ + var eg errgroup.Group + eg.SetLimit(runtime.NumCPU()) + + // step 2: delete batchRadiusItem, chunkBinItem, and the chunk data + bRitems := make([]*BatchRadiusItem, 0, size) + err = r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &BatchRadiusItem{} }, }, func(res storage.Result) (bool, error) { bRitems = append(bRitems, res.Entry.(*BatchRadiusItem)) @@ -530,10 +538,6 @@ func (r *Reserve) Reset(ctx context.Context) error { if err != nil { return err } - - var eg errgroup.Group - eg.SetLimit(runtime.NumCPU()) - for _, item := range bRitems { item := item eg.Go(func() error { @@ -553,6 +557,7 @@ func (r *Reserve) Reset(ctx context.Context) error { } bRitems = nil + // step 3: delete stampindex and chunkstamp sitems := make([]*stampindex.Item, 0, size) err = r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &stampindex.Item{} }, @@ -581,6 +586,20 @@ func (r *Reserve) Reset(ctx context.Context) error { } sitems = nil + // step 4: delete binItems + err = r.st.Run(context.Background(), func(s transaction.Store) error { + for i := uint8(0); i < swarm.MaxBins; i++ { + err := s.IndexStore().Delete(&BinItem{Bin: i}) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + r.size.Store(0) return nil diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index d06bb86ca46..7769af49389 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -836,6 +836,14 @@ func TestReset(t *testing.T) { } assert.Equal(t, c, total) + checkStore(t, ts.IndexStore(), &reserve.EpochItem{}, false) + + ids, _, err := r.LastBinIDs() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ids[0], uint64(chunksPerBin)) + err = r.Reset(context.Background()) if err != nil { t.Fatal(err) @@ -863,6 +871,13 @@ func TestReset(t *testing.T) { } assert.Equal(t, c, 0) + checkStore(t, ts.IndexStore(), &reserve.EpochItem{}, true) + + _, _, err = r.LastBinIDs() + if !errors.Is(err, storage.ErrNotFound) { + t.Fatalf("wanted %v, got %v", storage.ErrNotFound, err) + } + for _, c := range chs { h, err := c.Stamp().Hash() if err != nil { diff --git a/pkg/storer/internal/upload/uploadstore.go b/pkg/storer/internal/upload/uploadstore.go index 5876ad4b3a8..65c25014afc 100644 --- a/pkg/storer/internal/upload/uploadstore.go +++ b/pkg/storer/internal/upload/uploadstore.go @@ -572,7 +572,7 @@ func Report(ctx context.Context, st transaction.Store, chunk swarm.Chunk, state return nil } - return fmt.Errorf("failed to read uploadItem %s: %w", ui, err) + return fmt.Errorf("failed to read uploadItem %x: %w", ui.BatchID, err) } ti := &TagItem{TagID: ui.TagID} diff --git a/pkg/storer/migration/all_steps.go b/pkg/storer/migration/all_steps.go index 8a509c194e1..674e08c1143 100644 --- a/pkg/storer/migration/all_steps.go +++ b/pkg/storer/migration/all_steps.go @@ -22,15 +22,15 @@ func AfterInitSteps( 1: step_01, 2: step_02(st), 3: ReserveRepairer(st, storage.ChunkType, logger), - 4: step_04(sharkyPath, sharkyNoOfShards, st), - 5: step_05(st), - 6: step_06(st), + 4: step_04(sharkyPath, sharkyNoOfShards, st, logger), + 5: step_05(st, logger), + 6: step_06(st, logger), } } // BeforeInitSteps lists all migration steps for localstore IndexStore before the localstore is initiated. -func BeforeInitSteps(st storage.BatchStore) migration.Steps { +func BeforeInitSteps(st storage.BatchStore, logger log.Logger) migration.Steps { return map[uint64]migration.StepFn{ - 1: RefCountSizeInc(st), + 1: RefCountSizeInc(st, logger), } } diff --git a/pkg/storer/migration/all_steps_test.go b/pkg/storer/migration/all_steps_test.go index cbe03741674..265197e8fce 100644 --- a/pkg/storer/migration/all_steps_test.go +++ b/pkg/storer/migration/all_steps_test.go @@ -49,12 +49,12 @@ func TestPostSteps(t *testing.T) { st := inmemstore.New() - assert.NotEmpty(t, localmigration.BeforeInitSteps(st)) + assert.NotEmpty(t, localmigration.BeforeInitSteps(st, log.Noop)) t.Run("version numbers", func(t *testing.T) { t.Parallel() - err := migration.ValidateVersions(localmigration.BeforeInitSteps(st)) + err := migration.ValidateVersions(localmigration.BeforeInitSteps(st, log.Noop)) assert.NoError(t, err) }) @@ -63,7 +63,7 @@ func TestPostSteps(t *testing.T) { store := inmemstore.New() - err := migration.Migrate(store, "migration", localmigration.BeforeInitSteps(store)) + err := migration.Migrate(store, "migration", localmigration.BeforeInitSteps(store, log.Noop)) assert.NoError(t, err) }) } diff --git a/pkg/storer/migration/refCntSize.go b/pkg/storer/migration/refCntSize.go index bb592c2e2b0..64f6f58b315 100644 --- a/pkg/storer/migration/refCntSize.go +++ b/pkg/storer/migration/refCntSize.go @@ -8,7 +8,6 @@ import ( "context" "encoding/binary" "errors" - "os" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" @@ -101,9 +100,10 @@ func (r OldRetrievalIndexItem) String() string { return storageutil.JoinFields(r.Namespace(), r.ID()) } -func RefCountSizeInc(s storage.BatchStore) func() error { +func RefCountSizeInc(s storage.BatchStore, logger log.Logger) func() error { return func() error { - logger := log.NewLogger("migration-RefCountSizeInc", log.WithSink(os.Stdout)) + + logger := logger.WithName("migration-RefCountSizeInc").Register() logger.Info("starting migration of replacing chunkstore items to increase refCnt capacity") diff --git a/pkg/storer/migration/refCntSize_test.go b/pkg/storer/migration/refCntSize_test.go index 6a2fee2ae4c..bfbc8c687e7 100644 --- a/pkg/storer/migration/refCntSize_test.go +++ b/pkg/storer/migration/refCntSize_test.go @@ -8,6 +8,7 @@ import ( "math/rand" "testing" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" "github.com/ethersphere/bee/v2/pkg/storage/inmemstore" "github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstore" @@ -36,7 +37,7 @@ func Test_RefCntSize(t *testing.T) { assert.NoError(t, err) } - assert.NoError(t, stepFn(store)()) + assert.NoError(t, stepFn(store, log.Noop)()) // check if all entries are migrated. for _, entry := range oldItems { diff --git a/pkg/storer/migration/step_04.go b/pkg/storer/migration/step_04.go index 2495f022eb4..481b6744d83 100644 --- a/pkg/storer/migration/step_04.go +++ b/pkg/storer/migration/step_04.go @@ -6,7 +6,6 @@ package migration import ( "context" - "os" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" @@ -21,13 +20,14 @@ func step_04( sharkyBasePath string, sharkyNoOfShards int, st transaction.Storage, + logger log.Logger, ) func() error { return func() error { // for in-mem store, skip this step if sharkyBasePath == "" { return nil } - logger := log.NewLogger("migration-step-04", log.WithSink(os.Stdout)) + logger := logger.WithName("migration-step-04").Register() logger.Info("starting sharky recovery") sharkyRecover, err := sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) diff --git a/pkg/storer/migration/step_04_test.go b/pkg/storer/migration/step_04_test.go index e492b6a416a..758ddc7987f 100644 --- a/pkg/storer/migration/step_04_test.go +++ b/pkg/storer/migration/step_04_test.go @@ -11,6 +11,7 @@ import ( "path/filepath" "testing" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" "github.com/ethersphere/bee/v2/pkg/storage/inmemstore" chunktest "github.com/ethersphere/bee/v2/pkg/storage/testing" @@ -38,7 +39,7 @@ func Test_Step_04(t *testing.T) { store := inmemstore.New() storage := transaction.NewStorage(sharkyStore, store) - stepFn := localmigration.Step_04(sharkyDir, 1, storage) + stepFn := localmigration.Step_04(sharkyDir, 1, storage, log.Noop) chunks := chunktest.GenerateTestRandomChunks(10) diff --git a/pkg/storer/migration/step_05.go b/pkg/storer/migration/step_05.go index 25857277438..94b23d9ef67 100644 --- a/pkg/storer/migration/step_05.go +++ b/pkg/storer/migration/step_05.go @@ -7,7 +7,6 @@ package migration import ( "context" "fmt" - "os" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/storage" @@ -16,9 +15,11 @@ import ( ) // step_05 is a migration step that removes all upload items from the store. -func step_05(st transaction.Storage) func() error { +func step_05(st transaction.Storage, logger log.Logger) func() error { return func() error { - logger := log.NewLogger("migration-step-05", log.WithSink(os.Stdout)) + + logger := logger.WithName("migration-step-05").Register() + logger.Info("start removing upload items") itemC := make(chan storage.Item) diff --git a/pkg/storer/migration/step_05_test.go b/pkg/storer/migration/step_05_test.go index 640bb2da5b9..aeacd310f3e 100644 --- a/pkg/storer/migration/step_05_test.go +++ b/pkg/storer/migration/step_05_test.go @@ -8,6 +8,7 @@ import ( "context" "testing" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" "github.com/ethersphere/bee/v2/pkg/storage" "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore" @@ -98,7 +99,7 @@ func Test_Step_05(t *testing.T) { wantCount(t, store.IndexStore(), 10) - err = localmigration.Step_05(store)() + err = localmigration.Step_05(store, log.Noop)() if err != nil { t.Fatalf("step 05: %v", err) } diff --git a/pkg/storer/migration/step_06.go b/pkg/storer/migration/step_06.go index f5a48599578..4160e198b4c 100644 --- a/pkg/storer/migration/step_06.go +++ b/pkg/storer/migration/step_06.go @@ -9,7 +9,6 @@ import ( "context" "errors" "fmt" - "os" "runtime" "sync/atomic" "time" @@ -24,9 +23,10 @@ import ( ) // step_06 is a migration step that adds a stampHash to all BatchRadiusItems, ChunkBinItems and StampIndexItems. -func step_06(st transaction.Storage) func() error { +func step_06(st transaction.Storage, logger log.Logger) func() error { return func() error { - logger := log.NewLogger("migration-step-06", log.WithSink(os.Stdout)) + logger := logger.WithName("migration-step-06").Register() + logger.Info("start adding stampHash to BatchRadiusItems, ChunkBinItems and StampIndexItems") seenCount, doneCount, err := addStampHash(logger, st) @@ -51,7 +51,7 @@ func addStampHash(logger log.Logger, st transaction.Storage) (int64, int64, erro } if preBatchRadiusCnt != preChunkBinCnt { - return 0, 0, fmt.Errorf("pre-migration check: index counts do not match, %d vs %d. It's recommended that the repair-reserve cmd is run first", preBatchRadiusCnt, preChunkBinCnt) + return 0, 0, fmt.Errorf("pre-migration check: index counts do not match, %d vs %d", preBatchRadiusCnt, preChunkBinCnt) } // Delete epoch timestamp diff --git a/pkg/storer/migration/step_06_test.go b/pkg/storer/migration/step_06_test.go index d6e44d7e872..b5a5d3ecc7a 100644 --- a/pkg/storer/migration/step_06_test.go +++ b/pkg/storer/migration/step_06_test.go @@ -8,6 +8,7 @@ import ( "context" "testing" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/sharky" "github.com/ethersphere/bee/v2/pkg/storage" "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore" @@ -98,7 +99,7 @@ func Test_Step_06(t *testing.T) { } require.NoError(t, err) - err = localmigration.Step_06(store)() + err = localmigration.Step_06(store, log.Noop)() require.NoError(t, err) has, err := store.IndexStore().Has(&reserve.EpochItem{}) diff --git a/pkg/storer/mock/mockreserve.go b/pkg/storer/mock/mockreserve.go index fbd27330f9f..897403fe4ce 100644 --- a/pkg/storer/mock/mockreserve.go +++ b/pkg/storer/mock/mockreserve.go @@ -78,6 +78,12 @@ func WithReserveSize(s int) Option { }) } +func WithCapacityDoubling(s int) Option { + return optionFunc(func(p *ReserveStore) { + p.capacityDoubling = s + }) +} + func WithPutHook(f func(swarm.Chunk) error) Option { return optionFunc(func(p *ReserveStore) { p.putHook = f @@ -106,8 +112,9 @@ type ReserveStore struct { cursorsErr error epoch uint64 - radius uint8 - reservesize int + radius uint8 + reservesize int + capacityDoubling int subResponses []chunksResponse putHook func(swarm.Chunk) error diff --git a/pkg/storer/mock/mockstorer.go b/pkg/storer/mock/mockstorer.go index a955e168d17..69e8630d846 100644 --- a/pkg/storer/mock/mockstorer.go +++ b/pkg/storer/mock/mockstorer.go @@ -225,3 +225,7 @@ func (m *mockStorer) IsWithinStorageRadius(_ swarm.Address) bool { return true } func (m *mockStorer) DebugInfo(_ context.Context) (storer.Info, error) { return m.debugInfo, nil } + +func (m *mockStorer) NeighborhoodsStat(ctx context.Context) ([]*storer.NeighborhoodStat, error) { + return nil, nil +} diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index fe389048344..7f765ef5c08 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "math" + "math/bits" "slices" "sync" "sync/atomic" @@ -55,7 +56,7 @@ func (db *DB) startReserveWorkers( go db.reserveWorker(ctx) select { - case <-time.After(db.opts.reserveWarmupDuration): + case <-time.After(db.reserveOptions.warmupDuration): case <-db.quit: return } @@ -84,7 +85,6 @@ func (db *DB) countWithinRadius(ctx context.Context) (int, error) { radius := db.StorageRadius() evictBatches := make(map[string]bool) - err := db.reserve.IterateChunksItems(0, func(ci *reserve.ChunkBinItem) (bool, error) { if ci.Bin >= radius { count++ @@ -121,7 +121,7 @@ func (db *DB) reserveWorker(ctx context.Context) { overCapTrigger, overCapUnsub := db.events.Subscribe(reserveOverCapacity) defer overCapUnsub() - thresholdTicker := time.NewTicker(db.opts.reserveWakeupDuration) + thresholdTicker := time.NewTicker(db.reserveOptions.wakeupDuration) defer thresholdTicker.Stop() _, _ = db.countWithinRadius(ctx) @@ -159,7 +159,7 @@ func (db *DB) reserveWorker(ctx context.Context) { continue } - if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > db.opts.minimumRadius { + if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > db.reserveOptions.minimumRadius { radius-- if err := db.reserve.SetRadius(radius); err != nil { db.logger.Error(err, "reserve set radius") @@ -362,8 +362,8 @@ func (db *DB) unreserve(ctx context.Context) (err error) { } evict := target - totalEvicted - if evict < int(db.opts.reserveMinEvictCount) { // evict at least a min count - evict = int(db.opts.reserveMinEvictCount) + if evict < int(db.reserveOptions.minEvictCount) { // evict at least a min count + evict = int(db.reserveOptions.minEvictCount) } binEvicted, err := db.evictBatch(ctx, b, evict, radius) @@ -493,6 +493,96 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan }, errC } +type NeighborhoodStat struct { + Neighborhood swarm.Neighborhood + ReserveSizeWithinRadius int +} + +func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) { + + radius := db.StorageRadius() + + networkRadius := radius + uint8(db.reserveOptions.capacityDoubling) + + prefixes := neighborhoodPrefixes(db.baseAddr, int(radius), db.reserveOptions.capacityDoubling) + neighs := make([]*NeighborhoodStat, len(prefixes)) + for i, n := range prefixes { + neighs[i] = &NeighborhoodStat{swarm.NewNeighborhood(n, networkRadius), 0} + } + + err := db.reserve.IterateChunksItems(0, func(ch *reserve.ChunkBinItem) (bool, error) { + for _, n := range neighs { + if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= networkRadius { + n.ReserveSizeWithinRadius++ + break + } + } + return false, nil + }) + if err != nil { + return nil, err + } + + return neighs, err +} + +func neighborhoodPrefixes(base swarm.Address, radius int, suffixLength int) []swarm.Address { + bitCombinationsCount := int(math.Pow(2, float64(suffixLength))) + bitSuffixes := make([]uint8, bitCombinationsCount) + + for i := 0; i < bitCombinationsCount; i++ { + bitSuffixes[i] = uint8(i) + } + + binPrefixes := make([]swarm.Address, bitCombinationsCount) + + // copy base address + for i := range binPrefixes { + binPrefixes[i] = base.Clone() + } + + for j := range binPrefixes { + pseudoAddrBytes := binPrefixes[j].Bytes() + + // set pseudo suffix + bitSuffixPos := suffixLength - 1 + for l := radius + 0; l < radius+suffixLength+1; l++ { + index, pos := l/8, l%8 + + if hasBit(bitSuffixes[j], uint8(bitSuffixPos)) { + pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos))) + } else { + pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos))) + } + + bitSuffixPos-- + } + + // clear rest of the bits + for l := radius + suffixLength + 1; l < len(pseudoAddrBytes)*8; l++ { + index, pos := l/8, l%8 + pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos))) + } + } + + return binPrefixes +} + +// Clears the bit at pos in n. +func clearBit(n, pos uint8) uint8 { + mask := ^(uint8(1) << pos) + return n & mask +} + +// Sets the bit at pos in the integer n. +func setBit(n, pos uint8) uint8 { + return n | 1< 0 +} + // expiredBatchItem is a storage.Item implementation for expired batches. type expiredBatchItem struct { BatchID []byte diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go index ce5d253be96..564fec4ca6c 100644 --- a/pkg/storer/reserve_test.go +++ b/pkg/storer/reserve_test.go @@ -666,6 +666,106 @@ func TestSubscribeBinTrigger(t *testing.T) { }) } +func TestNeighborhoodStats(t *testing.T) { + t.Parallel() + + const ( + chunkCountPerPO = 16 + maxPO = 5 + networkRadius uint8 = 4 + doublingFactor uint8 = 2 + localRadius uint8 = networkRadius - doublingFactor + ) + + mustParse := func(s string) swarm.Address { + addr, err := swarm.ParseBitStrAddress(s) + if err != nil { + t.Fatal(err) + } + return addr + } + + var ( + baseAddr = mustParse("10000") + sister1 = mustParse("10010") + sister2 = mustParse("10100") + sister3 = mustParse("10110") + ) + + putChunks := func(addr swarm.Address, startingRadius int, st *storer.DB) { + putter := st.ReservePutter() + for i := 0; i < chunkCountPerPO; i++ { + ch := chunk.GenerateValidRandomChunkAt(addr, startingRadius) + err := putter.Put(context.Background(), ch) + if err != nil { + t.Fatal(err) + } + } + } + + testF := func(t *testing.T, st *storer.DB) { + t.Helper() + + putChunks(baseAddr, int(networkRadius), st) + putChunks(sister1, int(networkRadius), st) + putChunks(sister2, int(networkRadius), st) + putChunks(sister3, int(networkRadius), st) + + neighs, err := st.NeighborhoodsStat(context.Background()) + if err != nil { + t.Fatal(err) + } + + if len(neighs) != (1 << doublingFactor) { + t.Fatalf("number of neighborhoods does not matche. wanted %d, got %d", 1< Get the chunk data and calculate transformed hash -> Assemble the sample +// If the node has doubled their capacity by some factor, sampling process need to only pertain to the +// chunks of the selected neighborhood as determined by the anchor and the "committed depth" and NOT the whole reseve. +// The committed depth is the sum of the radius and the doubling factor. +// For example, the committed depth is 11, but the local node has a doubling factor of 3, so the +// local radius will eventually drop to 8. The sampling must only consider chunks with proximity 11 to the anchor. func (db *DB) ReserveSample( ctx context.Context, anchor []byte, - storageRadius uint8, + commitedDepth uint8, consensusTime uint64, minBatchBalance *big.Int, ) (Sample, error) { @@ -149,9 +154,12 @@ func (db *DB) ReserveSample( addStats(stats) }() - err := db.reserve.IterateChunksItems(storageRadius, func(chi *reserve.ChunkBinItem) (bool, error) { + err := db.reserve.IterateChunksItems(db.StorageRadius(), func(ch *reserve.ChunkBinItem) (bool, error) { + if swarm.Proximity(ch.Address.Bytes(), anchor) < commitedDepth { + return false, nil + } select { - case chunkC <- chi: + case chunkC <- ch: stats.TotalIterated++ return false, nil case <-ctx.Done(): @@ -308,12 +316,12 @@ func (db *DB) ReserveSample( allStats.TotalDuration = time.Since(t) if err := g.Wait(); err != nil { - db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err) } - db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{Stats: *allStats, Items: sampleItems}, nil } diff --git a/pkg/storer/sample_test.go b/pkg/storer/sample_test.go index 522f6807236..2f97aaab13f 100644 --- a/pkg/storer/sample_test.go +++ b/pkg/storer/sample_test.go @@ -58,13 +58,18 @@ func TestReserveSampler(t *testing.T) { var sample1 storer.Sample + var ( + radius uint8 = 5 + anchor = swarm.RandAddressAt(t, baseAddr, int(radius)).Bytes() + ) + t.Run("reserve sample 1", func(t *testing.T) { - sample, err := st.ReserveSample(context.TODO(), []byte("anchor"), 5, timeVar, nil) + sample, err := st.ReserveSample(context.TODO(), anchor, radius, timeVar, nil) if err != nil { t.Fatal(err) } - assertValidSample(t, sample) + assertValidSample(t, sample, radius, anchor) assertSampleNoErrors(t, sample) if sample.Stats.NewIgnored != 0 { @@ -92,7 +97,7 @@ func TestReserveSampler(t *testing.T) { // Now we generate another sample with the older timestamp. This should give us // the exact same sample, ensuring that none of the later chunks were considered. t.Run("reserve sample 2", func(t *testing.T) { - sample, err := st.ReserveSample(context.TODO(), []byte("anchor"), 5, timeVar, nil) + sample, err := st.ReserveSample(context.TODO(), anchor, 5, timeVar, nil) if err != nil { t.Fatal(err) } @@ -136,14 +141,137 @@ func TestReserveSampler(t *testing.T) { }) } +func TestReserveSamplerSisterNeighborhood(t *testing.T) { + t.Parallel() + + const ( + chunkCountPerPO = 64 + maxPO = 6 + committedDepth uint8 = 5 + doubling uint8 = 2 + depthOfResponsibility uint8 = committedDepth - doubling + ) + + randChunks := func(baseAddr swarm.Address, startingRadius int, timeVar uint64) []swarm.Chunk { + var chs []swarm.Chunk + for po := startingRadius; po < maxPO; po++ { + for i := 0; i < chunkCountPerPO; i++ { + ch := chunk.GenerateValidRandomChunkAt(baseAddr, po).WithBatch(3, 2, false) + if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC + ch = chunk.GenerateTestRandomSoChunk(t, ch) + } + + // override stamp timestamp to be before the consensus timestamp + ch = ch.WithStamp(postagetesting.MustNewStampWithTimestamp(timeVar)) + chs = append(chs, ch) + } + } + return chs + } + + testF := func(t *testing.T, baseAddr swarm.Address, st *storer.DB) { + t.Helper() + + count := 0 + // local neighborhood + timeVar := uint64(time.Now().UnixNano()) + chs := randChunks(baseAddr, int(committedDepth), timeVar) + putter := st.ReservePutter() + for _, ch := range chs { + err := putter.Put(context.Background(), ch) + if err != nil { + t.Fatal(err) + } + } + count += len(chs) + + sisterAnchor := swarm.RandAddressAt(t, baseAddr, int(depthOfResponsibility)) + + // chunks belonging to the sister neighborhood + chs = randChunks(sisterAnchor, int(committedDepth), timeVar) + putter = st.ReservePutter() + for _, ch := range chs { + err := putter.Put(context.Background(), ch) + if err != nil { + t.Fatal(err) + } + } + count += len(chs) + + t.Run("reserve size", reserveSizeTest(st.Reserve(), count)) + + t.Run("reserve sample", func(t *testing.T) { + sample, err := st.ReserveSample(context.TODO(), sisterAnchor.Bytes(), doubling, timeVar, nil) + if err != nil { + t.Fatal(err) + } + + assertValidSample(t, sample, doubling, baseAddr.Bytes()) + assertSampleNoErrors(t, sample) + + if sample.Stats.NewIgnored != 0 { + t.Fatalf("sample should not have ignored chunks") + } + }) + + t.Run("reserve sample 2", func(t *testing.T) { + sample, err := st.ReserveSample(context.TODO(), sisterAnchor.Bytes(), committedDepth, timeVar, nil) + if err != nil { + t.Fatal(err) + } + + assertValidSample(t, sample, depthOfResponsibility, baseAddr.Bytes()) + assertSampleNoErrors(t, sample) + + for _, s := range sample.Items { + if got := swarm.Proximity(s.ChunkAddress.Bytes(), baseAddr.Bytes()); got != depthOfResponsibility { + t.Fatalf("promixity must be exactly %d, got %d", depthOfResponsibility, got) + } + } + + if sample.Stats.NewIgnored != 0 { + t.Fatalf("sample should not have ignored chunks") + } + }) + + } + + t.Run("disk", func(t *testing.T) { + t.Parallel() + baseAddr := swarm.RandAddress(t) + opts := dbTestOps(baseAddr, 1000, nil, nil, time.Second) + opts.ValidStamp = func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil } + opts.ReserveCapacityDoubling = 2 + + storer, err := diskStorer(t, opts)() + if err != nil { + t.Fatal(err) + } + testF(t, baseAddr, storer) + }) + t.Run("mem", func(t *testing.T) { + t.Parallel() + baseAddr := swarm.RandAddress(t) + opts := dbTestOps(baseAddr, 1000, nil, nil, time.Second) + opts.ValidStamp = func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil } + opts.ReserveCapacityDoubling = 2 + + storer, err := memStorer(t, opts)() + if err != nil { + t.Fatal(err) + } + testF(t, baseAddr, storer) + }) +} + func TestRandSample(t *testing.T) { t.Parallel() sample := storer.RandSample(t, nil) - assertValidSample(t, sample) + assertValidSample(t, sample, 0, nil) } -func assertValidSample(t *testing.T, sample storer.Sample) { +func assertValidSample(t *testing.T, sample storer.Sample, minRadius uint8, anchor []byte) { t.Helper() // Assert that sample size is exactly storer.SampleSize @@ -165,6 +293,9 @@ func assertValidSample(t *testing.T, sample storer.Sample) { if item.Stamp == nil { t.Fatalf("sample item [%d]: stamp should be set", i) } + if got := swarm.Proximity(item.ChunkAddress.Bytes(), anchor); got < minRadius { + t.Fatalf("sample item [%d]: chunk should have proximity %d with the anchor, got %d", i, minRadius, got) + } } for i, item := range sample.Items { assertSampleItem(item, i) diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go index bf7e0f0dbc1..2094596976f 100644 --- a/pkg/storer/storer.go +++ b/pkg/storer/storer.go @@ -177,6 +177,10 @@ type Debugger interface { DebugInfo(context.Context) (Info, error) } +type NeighborhoodStats interface { + NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) +} + type memFS struct { afero.Fs } @@ -239,6 +243,7 @@ const ( defaultDisableSeeksCompaction = false defaultCacheCapacity = uint64(1_000_000) defaultBgCacheWorkers = 16 + DefaultReserveCapacity = 1 << 22 // 4194304 chunks indexPath = "indexstore" sharkyPath = "sharky" @@ -278,9 +283,9 @@ func initDiskRepository( return nil, nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } - err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store)) + err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store, opts.Logger)) if err != nil { - return nil, nil, nil, fmt.Errorf("failed core migration: %w", err) + return nil, nil, nil, errors.Join(store.Close(), fmt.Errorf("failed core migration: %w", err)) } if opts.LdbStats.Load() != nil { @@ -378,9 +383,10 @@ type Options struct { RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer - ReserveCapacity int - ReserveWakeUpDuration time.Duration - ReserveMinEvictCount uint64 + ReserveCapacity int + ReserveWakeUpDuration time.Duration + ReserveMinEvictCount uint64 + ReserveCapacityDoubling int CacheCapacity uint64 CacheMinEvictCount uint64 @@ -396,7 +402,7 @@ func defaultOptions() *Options { LdbDisableSeeksCompaction: defaultDisableSeeksCompaction, CacheCapacity: defaultCacheCapacity, Logger: log.Noop, - ReserveCapacity: 4_194_304, // 2^22 chunks + ReserveCapacity: DefaultReserveCapacity, ReserveWakeUpDuration: time.Minute * 30, } } @@ -436,17 +442,18 @@ type DB struct { validStamp postage.ValidStampFn setSyncerOnce sync.Once syncer Syncer - opts workerOpts + reserveOptions reserveOpts pinIntegrity *PinIntegrity } -type workerOpts struct { - reserveWarmupDuration time.Duration - reserveWakeupDuration time.Duration - reserveMinEvictCount uint64 - cacheMinEvictCount uint64 - minimumRadius uint8 +type reserveOpts struct { + warmupDuration time.Duration + wakeupDuration time.Duration + minEvictCount uint64 + cacheMinEvictCount uint64 + minimumRadius uint8 + capacityDoubling int } // New returns a newly constructed DB object which implements all the above @@ -482,6 +489,12 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { } } + defer func() { + if err != nil && dbCloser != nil { + err = errors.Join(err, dbCloser.Close()) + } + }() + sharkyBasePath := "" if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) @@ -495,7 +508,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { ) }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed regular migration: %w", err) } cacheObj, err := cache.New(ctx, st.IndexStore(), opts.CacheCapacity) @@ -527,12 +540,13 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { validStamp: opts.ValidStamp, events: events.NewSubscriber(), reserveBinEvents: events.NewSubscriber(), - opts: workerOpts{ - reserveWarmupDuration: opts.WarmupDuration, - reserveWakeupDuration: opts.ReserveWakeUpDuration, - reserveMinEvictCount: opts.ReserveMinEvictCount, - cacheMinEvictCount: opts.CacheMinEvictCount, - minimumRadius: uint8(opts.MinimumStorageRadius), + reserveOptions: reserveOpts{ + warmupDuration: opts.WarmupDuration, + wakeupDuration: opts.ReserveWakeUpDuration, + minEvictCount: opts.ReserveMinEvictCount, + cacheMinEvictCount: opts.CacheMinEvictCount, + minimumRadius: uint8(opts.MinimumStorageRadius), + capacityDoubling: opts.ReserveCapacityDoubling, }, directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes), pinIntegrity: pinIntegrity, diff --git a/pkg/storer/storer_test.go b/pkg/storer/storer_test.go index b85837b35ad..e5cdc655db1 100644 --- a/pkg/storer/storer_test.go +++ b/pkg/storer/storer_test.go @@ -194,7 +194,6 @@ func dbTestOps(baseAddr swarm.Address, reserveCapacity int, bs postage.Storer, r opts.ReserveCapacity = reserveCapacity opts.Batchstore = bs opts.ReserveWakeUpDuration = reserveWakeUpTime - opts.Logger = log.Noop return opts } diff --git a/pkg/swarm/swarm.go b/pkg/swarm/swarm.go index 3723a31dedd..1b2e83b2ed7 100644 --- a/pkg/swarm/swarm.go +++ b/pkg/swarm/swarm.go @@ -327,3 +327,56 @@ func bytesToAddr(b []byte) Address { copy(addr, b) return NewAddress(addr) } + +type Neighborhood struct { + b []byte + r uint8 +} + +func NewNeighborhood(a Address, bits uint8) Neighborhood { + return Neighborhood{b: a.b, r: bits} +} + +// String returns a bit string of the Neighborhood. +func (n Neighborhood) String() string { + return bitStr(n.b, n.r) +} + +// Equal returns true if two neighborhoods are identical. +func (n Neighborhood) Equal(b Neighborhood) bool { + return bytes.Equal(n.b, b.b) +} + +// Bytes returns bytes representation of the Neighborhood. +func (n Neighborhood) Bytes() []byte { + return n.b +} + +// Bytes returns bytes representation of the Neighborhood. +func (n Neighborhood) Clone() Neighborhood { + if n.b == nil { + return Neighborhood{} + } + return Neighborhood{b: append(make([]byte, 0, len(n.b)), n.Bytes()...), r: n.r} +} + +func bitStr(src []byte, bits uint8) string { + + ret := "" + + for _, b := range src { + for i := 7; i >= 0; i-- { + if b&(1< 0 { + ret += "1" + } else { + ret += "0" + } + bits-- + if bits == 0 { + return ret + } + } + } + + return ret +} diff --git a/pkg/swarm/swarm_test.go b/pkg/swarm/swarm_test.go index fdde62625be..99bc243da0e 100644 --- a/pkg/swarm/swarm_test.go +++ b/pkg/swarm/swarm_test.go @@ -5,6 +5,7 @@ package swarm_test import ( + "bytes" "encoding/hex" "encoding/json" "errors" @@ -191,3 +192,40 @@ func TestParseBitStr(t *testing.T) { } } } + +func TestNeighborhood(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + overlay swarm.Address + bitStr string + }{ + { + swarm.MustParseHexAddress("5c32a2fe3d217af8c943fa665ebcfbdf7ab9af0cf1b2a1c8e5fc163dad2f5c7b"), + "010111000", + }, + { + swarm.MustParseHexAddress("eac0903e59ff1c1a5f1d7d218b33f819b199aa0f68a19fd5fa02b7f84982b55d"), + "111010101", + }, + { + swarm.MustParseHexAddress("70143dd2863ae07edfe7c1bfee75daea06226f0678e1117337d274492226bfe0"), + "011100000", + }, + } { + + n := swarm.NewNeighborhood(tc.overlay, uint8(len(tc.bitStr))) + if n.Equal(swarm.NewNeighborhood(swarm.RandAddress(t), uint8(len(tc.bitStr)))) { + t.Fatal("addresses not should match") + } + if !n.Equal(swarm.NewNeighborhood(tc.overlay, uint8(len(tc.bitStr)))) { + t.Fatal("addresses should match") + } + if !bytes.Equal(n.Bytes(), tc.overlay.Bytes()) { + t.Fatal("bytes should match") + } + if n.String() != tc.bitStr { + t.Fatal("bit str should match") + } + } +} diff --git a/pkg/topology/kademlia/kademlia.go b/pkg/topology/kademlia/kademlia.go index 8269b0f4012..9462372093f 100644 --- a/pkg/topology/kademlia/kademlia.go +++ b/pkg/topology/kademlia/kademlia.go @@ -440,11 +440,9 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup, return case errors.Is(err, p2p.ErrPeerBlocklisted): k.logger.Debug("peer still in blocklist", "peer_address", bzzAddr) - k.logger.Warning("peer still in blocklist") return case err != nil: k.logger.Debug("peer not reachable from kademlia", "peer_address", bzzAddr, "error", err) - k.logger.Warning("peer not reachable when attempting to connect") return } @@ -457,7 +455,7 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup, k.recalcDepth() - k.logger.Info("connected to peer", "peer_address", peer.addr, "proximity_order", peer.po) + k.logger.Debug("connected to peer", "peer_address", peer.addr, "proximity_order", peer.po) k.notifyManageLoop() k.notifyPeerSig() } @@ -1218,7 +1216,7 @@ func (k *Kad) onConnected(ctx context.Context, addr swarm.Address) error { // Disconnected is called when peer disconnects. func (k *Kad) Disconnected(peer p2p.Peer) { - k.logger.Info("disconnected peer", "peer_address", peer.Address) + k.logger.Debug("disconnected peer", "peer_address", peer.Address) k.connectedPeers.Remove(peer.Address) diff --git a/pkg/transaction/monitor.go b/pkg/transaction/monitor.go index f4ebcd3be92..bbd4a50ec58 100644 --- a/pkg/transaction/monitor.go +++ b/pkg/transaction/monitor.go @@ -48,6 +48,7 @@ type transactionMonitor struct { } type transactionWatch struct { + start time.Time receiptC chan types.Receipt // channel to which the receipt will be written once available errC chan error // error channel (primarily for cancelled transactions) } @@ -91,6 +92,7 @@ func (tm *transactionMonitor) WatchTransaction(txHash common.Hash, nonce uint64) } tm.watchesByNonce[nonce][txHash] = append(tm.watchesByNonce[nonce][txHash], transactionWatch{ + start: time.Now(), receiptC: receiptC, errC: errC, }) @@ -169,44 +171,36 @@ func (tm *transactionMonitor) watchPending() { } } -// potentiallyConfirmedTxWatches returns all watches with nonce less than what was specified -func (tm *transactionMonitor) potentiallyConfirmedTxWatches(nonce uint64) (watches map[uint64]map[common.Hash][]transactionWatch) { +func (tm *transactionMonitor) hasWatches() bool { tm.lock.Lock() defer tm.lock.Unlock() + return len(tm.watchesByNonce) > 0 +} - potentiallyConfirmedTxWatches := make(map[uint64]map[common.Hash][]transactionWatch) - for n, watches := range tm.watchesByNonce { - if n < nonce { - potentiallyConfirmedTxWatches[n] = watches +func watchStart(watches []transactionWatch) time.Time { + if len(watches) == 0 { + return time.Time{} + } + start := watches[0].start + for _, w := range watches[1:] { + if w.start.Before(start) { + start = w.start } } - - return potentiallyConfirmedTxWatches -} - -func (tm *transactionMonitor) hasWatches() bool { - tm.lock.Lock() - defer tm.lock.Unlock() - return len(tm.watchesByNonce) > 0 + return start } // check pending checks the given block (number) for confirmed or cancelled transactions func (tm *transactionMonitor) checkPending(block uint64) error { - nonce, err := tm.backend.NonceAt(tm.ctx, tm.sender, new(big.Int).SetUint64(block)) - if err != nil { - return err - } - - // transactions with a nonce lower or equal to what is found on-chain are either confirmed or (at least temporarily) cancelled - potentiallyConfirmedTxWatches := tm.potentiallyConfirmedTxWatches(nonce) - confirmedNonces := make(map[uint64]*types.Receipt) var cancelledNonces []uint64 - for nonceGroup, watchMap := range potentiallyConfirmedTxWatches { - for txHash := range watchMap { + for nonceGroup, watchMap := range tm.watchesByNonce { + for txHash, watches := range watchMap { receipt, err := tm.backend.TransactionReceipt(tm.ctx, txHash) if err != nil { - if errors.Is(err, ethereum.NotFound) { + // wait for a few blocks to be mined before considering a transaction not existing + transactionWatchNotFoundTimeout := 5 * tm.pollingInterval + if errors.Is(err, ethereum.NotFound) && watchStart(watches).Before(time.Now().Add(transactionWatchNotFoundTimeout)) { // if both err and receipt are nil, there is no receipt // the reason why we consider this only potentially cancelled is to catch cases where after a reorg the original transaction wins continue @@ -220,7 +214,7 @@ func (tm *transactionMonitor) checkPending(block uint64) error { } } - for nonceGroup := range potentiallyConfirmedTxWatches { + for nonceGroup := range tm.watchesByNonce { if _, ok := confirmedNonces[nonceGroup]; ok { continue } @@ -240,7 +234,7 @@ func (tm *transactionMonitor) checkPending(block uint64) error { defer tm.lock.Unlock() for nonce, receipt := range confirmedNonces { - for txHash, watches := range potentiallyConfirmedTxWatches[nonce] { + for txHash, watches := range tm.watchesByNonce[nonce] { if receipt.TxHash == txHash { for _, watch := range watches { select { diff --git a/pkg/transaction/transaction.go b/pkg/transaction/transaction.go index 560136822ac..fc7a4904510 100644 --- a/pkg/transaction/transaction.go +++ b/pkg/transaction/transaction.go @@ -193,11 +193,6 @@ func (t *transactionService) Send(ctx context.Context, request *TxRequest, boost return common.Hash{}, err } - err = t.putNonce(nonce + 1) - if err != nil { - return common.Hash{}, err - } - txHash = signedTx.Hash() err = t.store.Put(storedTransactionKey(txHash), StoredTransaction{ @@ -353,10 +348,6 @@ func (t *transactionService) suggestedFeeAndTip(ctx context.Context, gasPrice *b } -func (t *transactionService) nonceKey() string { - return fmt.Sprintf("%s%x", noncePrefix, t.sender) -} - func storedTransactionKey(txHash common.Hash) string { return fmt.Sprintf("%s%x", storedTransactionPrefix, txHash) } @@ -371,26 +362,28 @@ func (t *transactionService) nextNonce(ctx context.Context) (uint64, error) { return 0, err } - var nonce uint64 - err = t.store.Get(t.nonceKey(), &nonce) + pendingTxs, err := t.PendingTransactions() if err != nil { - // If no nonce was found locally used whatever we get from the backend. - if errors.Is(err, storage.ErrNotFound) { - return onchainNonce, nil - } return 0, err } - // If the nonce onchain is larger than what we have there were external - // transactions and we need to update our nonce. - if onchainNonce > nonce { - return onchainNonce, nil + pendingTxs = t.filterPendingTransactions(t.ctx, pendingTxs) + + // PendingNonceAt returns the nonce we should use, but we will + // compare this to our pending tx list, therefore the -1. + var maxNonce uint64 = onchainNonce - 1 + for _, txHash := range pendingTxs { + trx, _, err := t.backend.TransactionByHash(ctx, txHash) + + if err != nil { + t.logger.Error(err, "pending transaction not found", "tx", txHash) + return 0, err + } + + maxNonce = max(maxNonce, trx.Nonce()) } - return nonce, nil -} -func (t *transactionService) putNonce(nonce uint64) error { - return t.store.Put(t.nonceKey(), nonce) + return maxNonce + 1, nil } // WaitForReceipt waits until either the transaction with the given hash has diff --git a/pkg/transaction/transaction_test.go b/pkg/transaction/transaction_test.go index 41cf94f0112..d105738afbf 100644 --- a/pkg/transaction/transaction_test.go +++ b/pkg/transaction/transaction_test.go @@ -30,10 +30,6 @@ import ( "github.com/ethersphere/bee/v2/pkg/util/testutil" ) -func nonceKey(sender common.Address) string { - return fmt.Sprintf("transaction_nonce_%x", sender) -} - func signerMockForTransaction(t *testing.T, signedTx *types.Transaction, sender common.Address, signerChainID *big.Int) crypto.Signer { t.Helper() return signermock.New( @@ -66,10 +62,6 @@ func signerMockForTransaction(t *testing.T, signedTx *types.Transaction, sender t.Fatalf("signing transaction with wrong gasprice. wanted %d, got %d", signedTx.GasPrice(), transaction.GasPrice()) } - if transaction.Nonce() != signedTx.Nonce() { - t.Fatalf("signing transaction with wrong nonce. wanted %d, got %d", signedTx.Nonce(), transaction.Nonce()) - } - return signedTx, nil }), signermock.WithEthereumAddressFunc(func() (common.Address, error) { @@ -112,10 +104,6 @@ func TestTransactionSend(t *testing.T) { Value: value, } store := storemock.NewStateStore() - err := store.Put(nonceKey(sender), nonce) - if err != nil { - t.Fatal(err) - } transactionService, err := transaction.NewService(logger, sender, backendmock.New( @@ -167,15 +155,6 @@ func TestTransactionSend(t *testing.T) { t.Fatal("returning wrong transaction hash") } - var storedNonce uint64 - err = store.Get(nonceKey(sender), &storedNonce) - if err != nil { - t.Fatal(err) - } - if storedNonce != nonce+1 { - t.Fatalf("nonce not stored correctly: want %d, got %d", nonce+1, storedNonce) - } - storedTransaction, err := transactionService.StoredTransaction(txHash) if err != nil { t.Fatal(err) @@ -238,10 +217,6 @@ func TestTransactionSend(t *testing.T) { MinEstimatedGasLimit: estimatedGasLimit, } store := storemock.NewStateStore() - err := store.Put(nonceKey(sender), nonce) - if err != nil { - t.Fatal(err) - } transactionService, err := transaction.NewService(logger, sender, backendmock.New( @@ -287,15 +262,6 @@ func TestTransactionSend(t *testing.T) { t.Fatal("returning wrong transaction hash") } - var storedNonce uint64 - err = store.Get(nonceKey(sender), &storedNonce) - if err != nil { - t.Fatal(err) - } - if storedNonce != nonce+1 { - t.Fatalf("nonce not stored correctly: want %d, got %d", nonce+1, storedNonce) - } - storedTransaction, err := transactionService.StoredTransaction(txHash) if err != nil { t.Fatal(err) @@ -363,10 +329,6 @@ func TestTransactionSend(t *testing.T) { Value: value, } store := storemock.NewStateStore() - err := store.Put(nonceKey(sender), nonce) - if err != nil { - t.Fatal(err) - } transactionService, err := transaction.NewService(logger, sender, backendmock.New( @@ -418,15 +380,6 @@ func TestTransactionSend(t *testing.T) { t.Fatal("returning wrong transaction hash") } - var storedNonce uint64 - err = store.Get(nonceKey(sender), &storedNonce) - if err != nil { - t.Fatal(err) - } - if storedNonce != nonce+1 { - t.Fatalf("nonce not stored correctly: want %d, got %d", nonce+1, storedNonce) - } - storedTransaction, err := transactionService.StoredTransaction(txHash) if err != nil { t.Fatal(err) @@ -534,15 +487,6 @@ func TestTransactionSend(t *testing.T) { if !bytes.Equal(txHash.Bytes(), signedTx.Hash().Bytes()) { t.Fatal("returning wrong transaction hash") } - - var storedNonce uint64 - err = store.Get(nonceKey(sender), &storedNonce) - if err != nil { - t.Fatal(err) - } - if storedNonce != nonce+1 { - t.Fatalf("did not store nonce correctly. wanted %d, got %d", nonce+1, storedNonce) - } }) t.Run("send_skipped_nonce", func(t *testing.T) { @@ -565,10 +509,6 @@ func TestTransactionSend(t *testing.T) { Value: value, } store := storemock.NewStateStore() - err := store.Put(nonceKey(sender), nonce) - if err != nil { - t.Fatal(err) - } transactionService, err := transaction.NewService(logger, sender, backendmock.New( @@ -614,15 +554,6 @@ func TestTransactionSend(t *testing.T) { if !bytes.Equal(txHash.Bytes(), signedTx.Hash().Bytes()) { t.Fatal("returning wrong transaction hash") } - - var storedNonce uint64 - err = store.Get(nonceKey(sender), &storedNonce) - if err != nil { - t.Fatal(err) - } - if storedNonce != nextNonce+1 { - t.Fatalf("did not store nonce correctly. wanted %d, got %d", nextNonce+1, storedNonce) - } }) }