diff --git a/.github/workflows/beekeeper.yml b/.github/workflows/beekeeper.yml
index aaa59eca449..95a09cfc1dc 100644
--- a/.github/workflows/beekeeper.yml
+++ b/.github/workflows/beekeeper.yml
@@ -11,7 +11,7 @@ env:
K3S_VERSION: "v1.22.17+k3s1"
REPLICA: 3
RUN_TYPE: "PR RUN"
- SETUP_CONTRACT_IMAGE_TAG: "1.0.4"
+ SETUP_CONTRACT_IMAGE_TAG: "2.0.7"
BEELOCAL_BRANCH: "main"
BEEKEEPER_BRANCH: "master"
BEEKEEPER_METRICS_ENABLED: false
diff --git a/Dockerfile b/Dockerfile
index 604285e2003..5aa35de1e8c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,7 +8,7 @@ COPY . ./
RUN make binary
-FROM debian:11.5-slim
+FROM debian:12.4-slim
ENV DEBIAN_FRONTEND noninteractive
diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser
index 90cebf06797..907c29ed0ab 100644
--- a/Dockerfile.goreleaser
+++ b/Dockerfile.goreleaser
@@ -1,4 +1,4 @@
-FROM debian:11.5-slim
+FROM debian:12.4-slim
ENV DEBIAN_FRONTEND noninteractive
diff --git a/Dockerfile.scratch b/Dockerfile.scratch
index 5b7489e059e..85905fd691b 100644
--- a/Dockerfile.scratch
+++ b/Dockerfile.scratch
@@ -1,4 +1,4 @@
-FROM debian:11.5-slim
+FROM debian:12.4-slim
ENV DEBIAN_FRONTEND noninteractive
diff --git a/cmd/bee/cmd/cmd.go b/cmd/bee/cmd/cmd.go
index 344cb05d894..ffe6bc06d01 100644
--- a/cmd/bee/cmd/cmd.go
+++ b/cmd/bee/cmd/cmd.go
@@ -22,70 +22,69 @@ import (
)
const (
- optionNameDataDir = "data-dir"
- optionNameCacheCapacity = "cache-capacity"
- optionNameDBOpenFilesLimit = "db-open-files-limit"
- optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
- optionNameDBWriteBufferSize = "db-write-buffer-size"
- optionNameDBDisableSeeksCompaction = "db-disable-seeks-compaction"
- optionNamePassword = "password"
- optionNamePasswordFile = "password-file"
- optionNameAPIAddr = "api-addr"
- optionNameP2PAddr = "p2p-addr"
- optionNameNATAddr = "nat-addr"
- optionNameP2PWSEnable = "p2p-ws-enable"
- optionNameDebugAPIEnable = "debug-api-enable"
- optionNameDebugAPIAddr = "debug-api-addr"
- optionNameBootnodes = "bootnode"
- optionNameNetworkID = "network-id"
- optionWelcomeMessage = "welcome-message"
- optionCORSAllowedOrigins = "cors-allowed-origins"
- optionNameTracingEnabled = "tracing-enable"
- optionNameTracingEndpoint = "tracing-endpoint"
- optionNameTracingHost = "tracing-host"
- optionNameTracingPort = "tracing-port"
- optionNameTracingServiceName = "tracing-service-name"
- optionNameVerbosity = "verbosity"
- optionNamePaymentThreshold = "payment-threshold"
- optionNamePaymentTolerance = "payment-tolerance-percent"
- optionNamePaymentEarly = "payment-early-percent"
- optionNameResolverEndpoints = "resolver-options"
- optionNameBootnodeMode = "bootnode-mode"
- optionNameClefSignerEnable = "clef-signer-enable"
- optionNameClefSignerEndpoint = "clef-signer-endpoint"
- optionNameClefSignerEthereumAddress = "clef-signer-ethereum-address"
- optionNameSwapEndpoint = "swap-endpoint" // deprecated: use rpc endpoint instead
- optionNameBlockchainRpcEndpoint = "blockchain-rpc-endpoint"
- optionNameSwapFactoryAddress = "swap-factory-address"
- optionNameSwapLegacyFactoryAddresses = "swap-legacy-factory-addresses"
- optionNameSwapInitialDeposit = "swap-initial-deposit"
- optionNameSwapEnable = "swap-enable"
- optionNameChequebookEnable = "chequebook-enable"
- optionNameSwapDeploymentGasPrice = "swap-deployment-gas-price"
- optionNameFullNode = "full-node"
- optionNamePostageContractAddress = "postage-stamp-address"
- optionNamePostageContractStartBlock = "postage-stamp-start-block"
- optionNamePriceOracleAddress = "price-oracle-address"
- optionNameRedistributionAddress = "redistribution-address"
- optionNameStakingAddress = "staking-address"
- optionNameBlockTime = "block-time"
- optionWarmUpTime = "warmup-time"
- optionNameMainNet = "mainnet"
- optionNameRetrievalCaching = "cache-retrieval"
- optionNameDevReserveCapacity = "dev-reserve-capacity"
- optionNameResync = "resync"
- optionNamePProfBlock = "pprof-profile"
- optionNamePProfMutex = "pprof-mutex"
- optionNameStaticNodes = "static-nodes"
- optionNameAllowPrivateCIDRs = "allow-private-cidrs"
- optionNameSleepAfter = "sleep-after"
- optionNameRestrictedAPI = "restricted"
- optionNameTokenEncryptionKey = "token-encryption-key"
- optionNameAdminPasswordHash = "admin-password"
- optionNameUsePostageSnapshot = "use-postage-snapshot"
- optionNameStorageIncentivesEnable = "storage-incentives-enable"
- optionNameStateStoreCacheCapacity = "statestore-cache-capacity"
- optionNameTargetNeighborhood = "target-neighborhood"
+ optionNameDataDir = "data-dir"
+ optionNameCacheCapacity = "cache-capacity"
+ optionNameDBOpenFilesLimit = "db-open-files-limit"
+ optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
+ optionNameDBWriteBufferSize = "db-write-buffer-size"
+ optionNameDBDisableSeeksCompaction = "db-disable-seeks-compaction"
+ optionNamePassword = "password"
+ optionNamePasswordFile = "password-file"
+ optionNameAPIAddr = "api-addr"
+ optionNameP2PAddr = "p2p-addr"
+ optionNameNATAddr = "nat-addr"
+ optionNameP2PWSEnable = "p2p-ws-enable"
+ optionNameDebugAPIEnable = "debug-api-enable"
+ optionNameDebugAPIAddr = "debug-api-addr"
+ optionNameBootnodes = "bootnode"
+ optionNameNetworkID = "network-id"
+ optionWelcomeMessage = "welcome-message"
+ optionCORSAllowedOrigins = "cors-allowed-origins"
+ optionNameTracingEnabled = "tracing-enable"
+ optionNameTracingEndpoint = "tracing-endpoint"
+ optionNameTracingHost = "tracing-host"
+ optionNameTracingPort = "tracing-port"
+ optionNameTracingServiceName = "tracing-service-name"
+ optionNameVerbosity = "verbosity"
+ optionNamePaymentThreshold = "payment-threshold"
+ optionNamePaymentTolerance = "payment-tolerance-percent"
+ optionNamePaymentEarly = "payment-early-percent"
+ optionNameResolverEndpoints = "resolver-options"
+ optionNameBootnodeMode = "bootnode-mode"
+ optionNameClefSignerEnable = "clef-signer-enable"
+ optionNameClefSignerEndpoint = "clef-signer-endpoint"
+ optionNameClefSignerEthereumAddress = "clef-signer-ethereum-address"
+ optionNameSwapEndpoint = "swap-endpoint" // deprecated: use rpc endpoint instead
+ optionNameBlockchainRpcEndpoint = "blockchain-rpc-endpoint"
+ optionNameSwapFactoryAddress = "swap-factory-address"
+ optionNameSwapInitialDeposit = "swap-initial-deposit"
+ optionNameSwapEnable = "swap-enable"
+ optionNameChequebookEnable = "chequebook-enable"
+ optionNameSwapDeploymentGasPrice = "swap-deployment-gas-price"
+ optionNameFullNode = "full-node"
+ optionNamePostageContractAddress = "postage-stamp-address"
+ optionNamePostageContractStartBlock = "postage-stamp-start-block"
+ optionNamePriceOracleAddress = "price-oracle-address"
+ optionNameRedistributionAddress = "redistribution-address"
+ optionNameStakingAddress = "staking-address"
+ optionNameBlockTime = "block-time"
+ optionWarmUpTime = "warmup-time"
+ optionNameMainNet = "mainnet"
+ optionNameRetrievalCaching = "cache-retrieval"
+ optionNameDevReserveCapacity = "dev-reserve-capacity"
+ optionNameResync = "resync"
+ optionNamePProfBlock = "pprof-profile"
+ optionNamePProfMutex = "pprof-mutex"
+ optionNameStaticNodes = "static-nodes"
+ optionNameAllowPrivateCIDRs = "allow-private-cidrs"
+ optionNameSleepAfter = "sleep-after"
+ optionNameRestrictedAPI = "restricted"
+ optionNameTokenEncryptionKey = "token-encryption-key"
+ optionNameAdminPasswordHash = "admin-password"
+ optionNameUsePostageSnapshot = "use-postage-snapshot"
+ optionNameStorageIncentivesEnable = "storage-incentives-enable"
+ optionNameStateStoreCacheCapacity = "statestore-cache-capacity"
+ optionNameTargetNeighborhood = "target-neighborhood"
)
// nolint:gochecknoinits
@@ -277,7 +276,6 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameSwapEndpoint, "", "swap blockchain endpoint") // deprecated: use rpc endpoint instead
cmd.Flags().String(optionNameBlockchainRpcEndpoint, "", "rpc blockchain endpoint")
cmd.Flags().String(optionNameSwapFactoryAddress, "", "swap factory addresses")
- cmd.Flags().StringSlice(optionNameSwapLegacyFactoryAddresses, nil, "legacy swap factory addresses")
cmd.Flags().String(optionNameSwapInitialDeposit, "0", "initial deposit if deploying a new chequebook")
cmd.Flags().Bool(optionNameSwapEnable, false, "enable swap")
cmd.Flags().Bool(optionNameChequebookEnable, true, "enable chequebook")
diff --git a/cmd/bee/cmd/db.go b/cmd/bee/cmd/db.go
index e2887090664..ec94ff7f6d8 100644
--- a/cmd/bee/cmd/db.go
+++ b/cmd/bee/cmd/db.go
@@ -26,7 +26,12 @@ import (
"github.com/spf13/cobra"
)
-const optionNameValidation = "validate"
+const (
+ optionNameValidation = "validate"
+ optionNameValidationPin = "validate-pin"
+ optionNameCollectionPin = "pin"
+ optionNameOutputLocation = "output"
+)
func (c *command) initDBCmd() {
cmd := &cobra.Command{
@@ -40,6 +45,7 @@ func (c *command) initDBCmd() {
dbInfoCmd(cmd)
dbCompactCmd(cmd)
dbValidateCmd(cmd)
+ dbValidatePinsCmd(cmd)
c.root.AddCommand(cmd)
}
@@ -166,6 +172,61 @@ func dbCompactCmd(cmd *cobra.Command) {
cmd.AddCommand(c)
}
+func dbValidatePinsCmd(cmd *cobra.Command) {
+ c := &cobra.Command{
+ Use: "validate-pin",
+ Short: "Validates pin collection chunks with sharky store.",
+ RunE: func(cmd *cobra.Command, args []string) (err error) {
+ v, err := cmd.Flags().GetString(optionNameVerbosity)
+ if err != nil {
+ return fmt.Errorf("get verbosity: %w", err)
+ }
+ v = strings.ToLower(v)
+ logger, err := newLogger(cmd, v)
+ if err != nil {
+ return fmt.Errorf("new logger: %w", err)
+ }
+
+ dataDir, err := cmd.Flags().GetString(optionNameDataDir)
+ if err != nil {
+ return fmt.Errorf("get data-dir: %w", err)
+ }
+ if dataDir == "" {
+ return errors.New("no data-dir provided")
+ }
+
+ providedPin, err := cmd.Flags().GetString(optionNameCollectionPin)
+ if err != nil {
+ return fmt.Errorf("read pin option: %w", err)
+ }
+
+ outputLoc, err := cmd.Flags().GetString(optionNameOutputLocation)
+ if err != nil {
+ return fmt.Errorf("read location option: %w", err)
+ }
+
+ localstorePath := path.Join(dataDir, "localstore")
+
+ err = storer.ValidatePinCollectionChunks(context.Background(), localstorePath, providedPin, outputLoc, &storer.Options{
+ Logger: logger,
+ RadiusSetter: noopRadiusSetter{},
+ Batchstore: new(postage.NoOpBatchStore),
+ ReserveCapacity: node.ReserveCapacity,
+ })
+ if err != nil {
+ return fmt.Errorf("localstore: %w", err)
+ }
+
+ return nil
+ },
+ }
+ c.Flags().String(optionNameDataDir, "", "data directory")
+ c.Flags().String(optionNameVerbosity, "info", "verbosity level")
+ c.Flags().String(optionNameCollectionPin, "", "only validate given pin")
+ c.Flags().String(optionNameOutputLocation, "", "location and name of the output file")
+ cmd.AddCommand(c)
+}
+
func dbValidateCmd(cmd *cobra.Command) {
c := &cobra.Command{
Use: "validate",
diff --git a/cmd/bee/cmd/deploy.go b/cmd/bee/cmd/deploy.go
index 24887e90b53..7b3e8e57734 100644
--- a/cmd/bee/cmd/deploy.go
+++ b/cmd/bee/cmd/deploy.go
@@ -71,14 +71,7 @@ func (c *command) initDeployCmd() error {
defer swapBackend.Close()
defer transactionMonitor.Close()
- chequebookFactory, err := node.InitChequebookFactory(
- logger,
- swapBackend,
- chainID,
- transactionService,
- factoryAddress,
- nil,
- )
+ chequebookFactory, err := node.InitChequebookFactory(logger, swapBackend, chainID, transactionService, factoryAddress)
if err != nil {
return err
}
diff --git a/cmd/bee/cmd/start.go b/cmd/bee/cmd/start.go
index e76719669af..ee287a2104a 100644
--- a/cmd/bee/cmd/start.go
+++ b/cmd/bee/cmd/start.go
@@ -313,7 +313,6 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo
BootnodeMode: bootNode,
BlockchainRpcEndpoint: blockchainRpcEndpoint,
SwapFactoryAddress: c.config.GetString(optionNameSwapFactoryAddress),
- SwapLegacyFactoryAddresses: c.config.GetStringSlice(optionNameSwapLegacyFactoryAddresses),
SwapInitialDeposit: c.config.GetString(optionNameSwapInitialDeposit),
SwapEnable: c.config.GetBool(optionNameSwapEnable),
ChequebookEnable: c.config.GetBool(optionNameChequebookEnable),
diff --git a/go.mod b/go.mod
index 5f38b82e566..5d3e61c85ec 100644
--- a/go.mod
+++ b/go.mod
@@ -9,9 +9,9 @@ require (
github.com/casbin/casbin/v2 v2.35.0
github.com/coreos/go-semver v0.3.0
github.com/ethereum/go-ethereum v1.13.4
- github.com/ethersphere/go-price-oracle-abi v0.1.0
- github.com/ethersphere/go-storage-incentives-abi v0.6.0
- github.com/ethersphere/go-sw3-abi v0.4.0
+ github.com/ethersphere/go-price-oracle-abi v0.2.0
+ github.com/ethersphere/go-storage-incentives-abi v0.6.2
+ github.com/ethersphere/go-sw3-abi v0.6.5
github.com/ethersphere/langos v1.0.0
github.com/go-playground/validator/v10 v10.11.1
github.com/gogo/protobuf v1.3.2
diff --git a/go.sum b/go.sum
index 0afd81c3918..c2938b6ec32 100644
--- a/go.sum
+++ b/go.sum
@@ -238,12 +238,12 @@ github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1
github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
github.com/ethereum/go-ethereum v1.13.4 h1:25HJnaWVg3q1O7Z62LaaI6S9wVq8QCw3K88g8wEzrcM=
github.com/ethereum/go-ethereum v1.13.4/go.mod h1:I0U5VewuuTzvBtVzKo7b3hJzDhXOUtn9mJW7SsIPB0Q=
-github.com/ethersphere/go-price-oracle-abi v0.1.0 h1:yg/hK8nETNvk+GEBASlbakMFv/CVp7HXiycrHw1pRV8=
-github.com/ethersphere/go-price-oracle-abi v0.1.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk=
-github.com/ethersphere/go-storage-incentives-abi v0.6.0 h1:lfGViU/wJg/CyXlntNvTQpqQ2A4QYGLJ7jo+Pw+H+a4=
-github.com/ethersphere/go-storage-incentives-abi v0.6.0/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc=
-github.com/ethersphere/go-sw3-abi v0.4.0 h1:T3ANY+ktWrPAwe2U0tZi+DILpkHzto5ym/XwV/Bbz8g=
-github.com/ethersphere/go-sw3-abi v0.4.0/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU=
+github.com/ethersphere/go-price-oracle-abi v0.2.0 h1:wtIcYLgNZHY4BjYwJCnu93SvJdVAZVvBaKinspyyHvQ=
+github.com/ethersphere/go-price-oracle-abi v0.2.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk=
+github.com/ethersphere/go-storage-incentives-abi v0.6.2 h1:lcVylu+KRUEOUvytP6ofcyTwTE7UmfE2oJPC4jpVjSo=
+github.com/ethersphere/go-storage-incentives-abi v0.6.2/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc=
+github.com/ethersphere/go-sw3-abi v0.6.5 h1:M5dcIe1zQYvGpY2K07UNkNU9Obc4U+A1fz68Ho/Q+XE=
+github.com/ethersphere/go-sw3-abi v0.6.5/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU=
github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc=
github.com/ethersphere/langos v1.0.0/go.mod h1:dlcN2j4O8sQ+BlCaxeBu43bgr4RQ+inJ+pHwLeZg5Tw=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
diff --git a/packaging/bee.yaml b/packaging/bee.yaml
index 328edb9c421..560352926e1 100644
--- a/packaging/bee.yaml
+++ b/packaging/bee.yaml
@@ -60,8 +60,6 @@ password-file: /var/lib/bee/password
# blockchain-rpc-endpoint: ""
## swap factory address
# swap-factory-address: ""
-## legacy swap factory addresses
-# swap-legacy-factory-addresses: ""
## initial deposit if deploying a new chequebook (default 0)
# swap-initial-deposit: 0
## gas price in wei to use for deployment and funding (default "")
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index 02005008d41..cd95f1d131f 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -12,7 +12,7 @@ Set all configuration variables inside `.env`
If you want to run node in full mode, set `BEE_FULL_NODE=true`
Bee requires an Ethereum endpoint to function. Obtain a free Infura account and set:
-- `BEE_BLOCKCHAIN_RPC_ENDPOINT=wss://goerli.infura.io/ws/v3/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`
+- `BEE_BLOCKCHAIN_RPC_ENDPOINT=wss://sepolia.infura.io/ws/v3/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`
Set bee password by either setting `BEE_PASSWORD` or `BEE_PASSWORD_FILE`
@@ -30,7 +30,7 @@ Start it with
docker-compose up -d
```
-From logs find URL line with `on goerli you can get both goerli eth and goerli bzz from` and prefund your node
+From logs find URL line with `on sepolia you can get both sepolia eth and sepolia bzz from` and prefund your node
```
docker-compose logs -f bee-1
```
diff --git a/packaging/homebrew-amd64/bee.yaml b/packaging/homebrew-amd64/bee.yaml
index 22ccc3af6c1..235ad42e781 100644
--- a/packaging/homebrew-amd64/bee.yaml
+++ b/packaging/homebrew-amd64/bee.yaml
@@ -60,8 +60,6 @@ password-file: /usr/local/var/lib/swarm-bee/password
# blockchain-rpc-endpoint: ""
## swap factory address
# swap-factory-address: ""
-## legacy swap factory addresses
-# swap-legacy-factory-addresses: ""
## initial deposit if deploying a new chequebook (default 0)
# swap-initial-deposit: 0
## gas price in wei to use for deployment and funding (default "")
diff --git a/packaging/homebrew-arm64/bee.yaml b/packaging/homebrew-arm64/bee.yaml
index 122e1288bb6..212fadca581 100644
--- a/packaging/homebrew-arm64/bee.yaml
+++ b/packaging/homebrew-arm64/bee.yaml
@@ -60,8 +60,6 @@ password-file: /opt/homebrew/var/lib/swarm-bee/password
# blockchain-rpc-endpoint: ""
## swap factory address
# swap-factory-address: ""
-## legacy swap factory addresses
-# swap-legacy-factory-addresses: ""
## initial deposit if deploying a new chequebook (default 0)
# swap-initial-deposit: 0
## gas price in wei to use for deployment and funding (default "")
diff --git a/packaging/scoop/bee.yaml b/packaging/scoop/bee.yaml
index 00387390905..18c01229b2c 100644
--- a/packaging/scoop/bee.yaml
+++ b/packaging/scoop/bee.yaml
@@ -50,8 +50,6 @@ password-file: ./password
# blockchain-rpc-endpoint: ""
## swap factory address
# swap-factory-address: ""
-## legacy swap factory addresses
-# swap-legacy-factory-addresses: ""
## initial deposit if deploying a new chequebook (default 0)
# swap-initial-deposit: 0
## gas price in wei to use for deployment and funding (default "")
diff --git a/pkg/api/api.go b/pkg/api/api.go
index 470174c68d4..64c5712edf8 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -783,7 +783,7 @@ func (p *putterSessionWrapper) Done(ref swarm.Address) error {
}
func (p *putterSessionWrapper) Cleanup() error {
- return errors.Join(p.PutterSession.Cleanup(), p.save())
+ return p.PutterSession.Cleanup()
}
func (s *Service) getStamper(batchID []byte) (postage.Stamper, func() error, error) {
diff --git a/pkg/api/bytes.go b/pkg/api/bytes.go
index 894da9a4b06..4cb90c1b763 100644
--- a/pkg/api/bytes.go
+++ b/pkg/api/bytes.go
@@ -19,6 +19,9 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/gorilla/mux"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+ olog "github.com/opentracing/opentracing-go/log"
)
type bytesPostResponse struct {
@@ -27,7 +30,8 @@ type bytesPostResponse struct {
// bytesUploadHandler handles upload of raw binary data of arbitrary length.
func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("post_bytes").Build())
+ span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bytes", s.logger.WithName("post_bytes").Build())
+ defer span.Finish()
headers := struct {
BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"`
@@ -59,11 +63,13 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
default:
jsonhttp.InternalServerError(w, "cannot get or create tag")
}
+ ext.LogError(span, err, log.String("action", "tag.create"))
return
}
+ span.SetTag("tagID", tag)
}
- putter, err := s.newStamperPutter(r.Context(), putterOptions{
+ putter, err := s.newStamperPutter(ctx, putterOptions{
BatchID: headers.BatchID,
TagID: tag,
Pin: headers.Pin,
@@ -84,6 +90,7 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
default:
jsonhttp.BadRequest(w, nil)
}
+ ext.LogError(span, err, log.String("action", "new.StamperPutter"))
return
}
@@ -94,7 +101,7 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
}
p := requestPipelineFn(putter, headers.Encrypt, headers.RLevel)
- address, err := p(r.Context(), r.Body)
+ address, err := p(ctx, r.Body)
if err != nil {
logger.Debug("split write all failed", "error", err)
logger.Error(nil, "split write all failed")
@@ -104,20 +111,27 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
default:
jsonhttp.InternalServerError(ow, "split write all failed")
}
+ ext.LogError(span, err, log.String("action", "split.WriteAll"))
return
}
+ span.SetTag("root_address", address)
+
err = putter.Done(address)
if err != nil {
logger.Debug("done split failed", "error", err)
logger.Error(nil, "done split failed")
jsonhttp.InternalServerError(ow, "done split failed")
+ ext.LogError(span, err, log.String("action", "putter.Done"))
return
}
if tag != 0 {
w.Header().Set(SwarmTagHeader, fmt.Sprint(tag))
}
+
+ span.LogFields(olog.Bool("success", true))
+
w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader)
jsonhttp.Created(w, bytesPostResponse{
Reference: address,
diff --git a/pkg/api/bzz.go b/pkg/api/bzz.go
index 67a80a58918..6d96b3e83bc 100644
--- a/pkg/api/bzz.go
+++ b/pkg/api/bzz.go
@@ -16,6 +16,10 @@ import (
"strings"
"time"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/pkg/feeds"
"github.com/ethersphere/bee/pkg/file/joiner"
@@ -55,7 +59,8 @@ func lookaheadBufferSize(size int64) int {
}
func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("post_bzz").Build())
+ span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bzz", s.logger.WithName("post_bzz").Build())
+ defer span.Finish()
headers := struct {
ContentType string `map:"Content-Type,mimeMediaType" validate:"required"`
@@ -89,11 +94,13 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
default:
jsonhttp.InternalServerError(w, "cannot get or create tag")
}
+ ext.LogError(span, err, olog.String("action", "tag.create"))
return
}
+ span.SetTag("tagID", tag)
}
- putter, err := s.newStamperPutter(r.Context(), putterOptions{
+ putter, err := s.newStamperPutter(ctx, putterOptions{
BatchID: headers.BatchID,
TagID: tag,
Pin: headers.Pin,
@@ -114,6 +121,7 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
default:
jsonhttp.BadRequest(w, nil)
}
+ ext.LogError(span, err, olog.String("action", "new.StamperPutter"))
return
}
@@ -124,10 +132,10 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
}
if headers.IsDir || headers.ContentType == multiPartFormData {
- s.dirUploadHandler(logger, ow, r, putter, r.Header.Get(ContentTypeHeader), headers.Encrypt, tag, headers.RLevel)
+ s.dirUploadHandler(ctx, logger, span, ow, r, putter, r.Header.Get(ContentTypeHeader), headers.Encrypt, tag, headers.RLevel)
return
}
- s.fileUploadHandler(logger, ow, r, putter, headers.Encrypt, tag, headers.RLevel)
+ s.fileUploadHandler(ctx, logger, span, ow, r, putter, headers.Encrypt, tag, headers.RLevel)
}
// fileUploadResponse is returned when an HTTP request to upload a file is successful
@@ -138,7 +146,9 @@ type bzzUploadResponse struct {
// fileUploadHandler uploads the file and its metadata supplied in the file body and
// the headers
func (s *Service) fileUploadHandler(
+ ctx context.Context,
logger log.Logger,
+ span opentracing.Span,
w http.ResponseWriter,
r *http.Request,
putter storer.PutterSession,
@@ -155,7 +165,6 @@ func (s *Service) fileUploadHandler(
}
p := requestPipelineFn(putter, encrypt, rLevel)
- ctx := r.Context()
// first store the file and get its reference
fr, err := p(ctx, r.Body)
@@ -168,6 +177,7 @@ func (s *Service) fileUploadHandler(
default:
jsonhttp.InternalServerError(w, errFileStore)
}
+ ext.LogError(span, err, olog.String("action", "file.store"))
return
}
@@ -255,11 +265,16 @@ func (s *Service) fileUploadHandler(
logger.Debug("done split failed", "error", err)
logger.Error(nil, "done split failed")
jsonhttp.InternalServerError(w, "done split failed")
+ ext.LogError(span, err, olog.String("action", "putter.Done"))
return
}
+ span.LogFields(olog.Bool("success", true))
+ span.SetTag("root_address", manifestReference)
+
if tagID != 0 {
w.Header().Set(SwarmTagHeader, fmt.Sprint(tagID))
+ span.SetTag("tagID", tagID)
}
w.Header().Set(ETagHeader, fmt.Sprintf("%q", manifestReference.String()))
w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader)
diff --git a/pkg/api/dirs.go b/pkg/api/dirs.go
index c368c8e9072..06aff9c59d6 100644
--- a/pkg/api/dirs.go
+++ b/pkg/api/dirs.go
@@ -28,13 +28,18 @@ import (
storer "github.com/ethersphere/bee/pkg/storer"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
)
var errEmptyDir = errors.New("no files in root directory")
// dirUploadHandler uploads a directory supplied as a tar in an HTTP request
func (s *Service) dirUploadHandler(
+ ctx context.Context,
logger log.Logger,
+ span opentracing.Span,
w http.ResponseWriter,
r *http.Request,
putter storer.PutterSession,
@@ -66,7 +71,7 @@ func (s *Service) dirUploadHandler(
defer r.Body.Close()
reference, err := storeDir(
- r.Context(),
+ ctx,
encrypt,
dReader,
logger,
@@ -89,6 +94,7 @@ func (s *Service) dirUploadHandler(
default:
jsonhttp.InternalServerError(w, errDirectoryStore)
}
+ ext.LogError(span, err, olog.String("action", "dir.store"))
return
}
@@ -97,11 +103,13 @@ func (s *Service) dirUploadHandler(
logger.Debug("store dir failed", "error", err)
logger.Error(nil, "store dir failed")
jsonhttp.InternalServerError(w, errDirectoryStore)
+ ext.LogError(span, err, olog.String("action", "putter.Done"))
return
}
if tag != 0 {
w.Header().Set(SwarmTagHeader, fmt.Sprint(tag))
+ span.LogFields(olog.Bool("success", true))
}
w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader)
jsonhttp.Created(w, bzzUploadResponse{
diff --git a/pkg/api/router.go b/pkg/api/router.go
index 1ce1bb94ae0..7c854543b88 100644
--- a/pkg/api/router.go
+++ b/pkg/api/router.go
@@ -342,14 +342,12 @@ func (s *Service) mountAPI() {
if s.Restricted {
handle("/auth", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
- s.newTracingHandler("auth"),
jsonhttp.NewMaxBodyBytesHandler(512),
web.FinalHandlerFunc(s.authHandler),
),
})
handle("/refresh", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
- s.newTracingHandler("auth"),
jsonhttp.NewMaxBodyBytesHandler(512),
web.FinalHandlerFunc(s.refreshHandler),
),
diff --git a/pkg/api/stewardship.go b/pkg/api/stewardship.go
index fe1beb34e48..e617549d88f 100644
--- a/pkg/api/stewardship.go
+++ b/pkg/api/stewardship.go
@@ -70,7 +70,6 @@ func (s *Service) stewardshipPutHandler(w http.ResponseWriter, r *http.Request)
err = s.steward.Reupload(r.Context(), paths.Address, stamper)
if err != nil {
- err = errors.Join(err, save())
logger.Debug("re-upload failed", "chunk_address", paths.Address, "error", err)
logger.Error(nil, "re-upload failed")
jsonhttp.InternalServerError(w, "re-upload failed")
diff --git a/pkg/config/chain.go b/pkg/config/chain.go
index fcac751c60c..e551b3a1c47 100644
--- a/pkg/config/chain.go
+++ b/pkg/config/chain.go
@@ -27,7 +27,6 @@ type ChainConfig struct {
RedistributionAddress common.Address
SwapPriceOracleAddress common.Address
CurrentFactoryAddress common.Address
- LegacyFactoryAddresses []common.Address
// ABIs.
StakingABI string
@@ -41,16 +40,13 @@ var (
NetworkID: abi.TestnetNetworkID,
PostageStampStartBlock: abi.TestnetPostageStampBlockNumber,
NativeTokenSymbol: "ETH",
- SwarmTokenSymbol: "gBZZ",
+ SwarmTokenSymbol: "sBZZ",
StakingAddress: common.HexToAddress(abi.TestnetStakingAddress),
PostageStampAddress: common.HexToAddress(abi.TestnetPostageStampAddress),
RedistributionAddress: common.HexToAddress(abi.TestnetRedistributionAddress),
- SwapPriceOracleAddress: common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2"),
- CurrentFactoryAddress: common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273"),
- LegacyFactoryAddresses: []common.Address{
- common.HexToAddress("0xf0277caffea72734853b834afc9892461ea18474"),
- },
+ SwapPriceOracleAddress: common.HexToAddress("0xe821533d30A4250e50812Aa060EEb2E8Ef3D98f6"),
+ CurrentFactoryAddress: common.HexToAddress("0x0fF044F6bB4F684a5A149B46D7eC03ea659F98A1"),
StakingABI: abi.TestnetStakingABI,
PostageStampABI: abi.TestnetPostageStampABI,
diff --git a/pkg/crypto/crypto_test.go b/pkg/crypto/crypto_test.go
index f649694b160..4911be7032f 100644
--- a/pkg/crypto/crypto_test.go
+++ b/pkg/crypto/crypto_test.go
@@ -12,6 +12,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/pkg/crypto"
+ "github.com/ethersphere/bee/pkg/swarm"
)
func TestGenerateSecp256k1Key(t *testing.T) {
@@ -252,3 +253,60 @@ func TestNewEthereumAddress(t *testing.T) {
t.Fatalf("address mismatch %x %x", address, expectAddress)
}
}
+
+func TestNewOverlayFromEthereumAddress(t *testing.T) {
+ t.Parallel()
+
+ testCases := []struct {
+ wantAddress swarm.Address
+ overlayID uint64
+ hash []byte
+ expectedAddress string
+ }{
+ {
+ wantAddress: swarm.MustParseHexAddress("1815cac638d1525b47f848daf02b7953e4edd15c"),
+ overlayID: 1,
+ hash: common.HexToHash("0x1").Bytes(),
+ expectedAddress: "a38f7a814d4b249ae9d3821e9b898019c78ac9abe248fff171782c32a3849a17",
+ },
+ {
+ wantAddress: swarm.MustParseHexAddress("1815cac638d1525b47f848daf02b7953e4edd15c"),
+ overlayID: 1,
+ hash: common.HexToHash("0x2").Bytes(),
+ expectedAddress: "c63c10b1728dfc463c64c264f71a621fe640196979375840be42dc496b702610",
+ },
+ {
+ wantAddress: swarm.MustParseHexAddress("d26bc1715e933bd5f8fad16310042f13abc16159"),
+ overlayID: 2,
+ hash: common.HexToHash("0x1").Bytes(),
+ expectedAddress: "9f421f9149b8e31e238cfbdc6e5e833bacf1e42f77f60874d49291292858968e",
+ },
+ {
+ wantAddress: swarm.MustParseHexAddress("ac485e3c63dcf9b4cda9f007628bb0b6fed1c063"),
+ overlayID: 1,
+ hash: common.HexToHash("0x0").Bytes(),
+ expectedAddress: "fe3a6d582c577404fb19df64a44e00d3a3b71230a8464c0dd34af3f0791b45f2",
+ },
+ }
+
+ for _, tc := range testCases {
+
+ gotAddress, err := crypto.NewOverlayFromEthereumAddress(tc.wantAddress.Bytes(), tc.overlayID, tc.hash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if l := len(gotAddress.Bytes()); l != swarm.HashSize {
+ t.Errorf("got address length %v, want %v", l, swarm.HashSize)
+ }
+
+ wantAddress, err := swarm.ParseHexAddress(tc.expectedAddress)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !wantAddress.Equal(gotAddress) {
+ t.Errorf("Expected %s, but got %s", wantAddress, gotAddress)
+ }
+ }
+}
diff --git a/pkg/node/chain.go b/pkg/node/chain.go
index bd49bc3ac02..c4cab943207 100644
--- a/pkg/node/chain.go
+++ b/pkg/node/chain.go
@@ -69,7 +69,7 @@ func InitChain(
var versionString string
err = rpcClient.CallContext(ctx, &versionString, "web3_clientVersion")
if err != nil {
- logger.Info("could not connect to backend; in a swap-enabled network a working blockchain node (for xdai network in production, goerli in testnet) is required; check your node or specify another node using --swap-endpoint.", "backend_endpoint", endpoint)
+ logger.Info("could not connect to backend; in a swap-enabled network a working blockchain node (for xdai network in production, sepolia in testnet) is required; check your node or specify another node using --swap-endpoint.", "backend_endpoint", endpoint)
return nil, common.Address{}, 0, nil, nil, fmt.Errorf("blockchain client get version: %w", err)
}
@@ -100,20 +100,11 @@ func InitChain(
// InitChequebookFactory will initialize the chequebook factory with the given
// chain backend.
-func InitChequebookFactory(
- logger log.Logger,
- backend transaction.Backend,
- chainID int64,
- transactionService transaction.Service,
- factoryAddress string,
- legacyFactoryAddresses []string,
-) (chequebook.Factory, error) {
+func InitChequebookFactory(logger log.Logger, backend transaction.Backend, chainID int64, transactionService transaction.Service, factoryAddress string) (chequebook.Factory, error) {
var currentFactory common.Address
- var legacyFactories []common.Address
-
chainCfg, found := config.GetByChainID(chainID)
- foundFactory, foundLegacyFactories := chainCfg.CurrentFactoryAddress, chainCfg.LegacyFactoryAddresses
+ foundFactory := chainCfg.CurrentFactoryAddress
if factoryAddress == "" {
if !found {
return nil, fmt.Errorf("no known factory address for this network (chain id: %d)", chainID)
@@ -127,25 +118,7 @@ func InitChequebookFactory(
logger.Info("using custom factory address", "factory_address", currentFactory)
}
- if len(legacyFactoryAddresses) == 0 {
- if found {
- legacyFactories = foundLegacyFactories
- }
- } else {
- for _, legacyAddress := range legacyFactoryAddresses {
- if !common.IsHexAddress(legacyAddress) {
- return nil, errors.New("malformed factory address")
- }
- legacyFactories = append(legacyFactories, common.HexToAddress(legacyAddress))
- }
- }
-
- return chequebook.NewFactory(
- backend,
- transactionService,
- currentFactory,
- legacyFactories,
- ), nil
+ return chequebook.NewFactory(backend, transactionService, currentFactory), nil
}
// InitChequebookService will initialize the chequebook service with the given
@@ -383,7 +356,7 @@ func (m noOpChainBackend) Metrics() []prometheus.Collector {
}
func (m noOpChainBackend) CodeAt(context.Context, common.Address, *big.Int) ([]byte, error) {
- return common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_4_0), nil
+ return common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_6_5), nil
}
func (m noOpChainBackend) CallContract(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error) {
return nil, errors.New("disabled chain backend")
diff --git a/pkg/node/node.go b/pkg/node/node.go
index 960a907a836..214cad27d4f 100644
--- a/pkg/node/node.go
+++ b/pkg/node/node.go
@@ -146,7 +146,6 @@ type Options struct {
BootnodeMode bool
BlockchainRpcEndpoint string
SwapFactoryAddress string
- SwapLegacyFactoryAddresses []string
SwapInitialDeposit string
SwapEnable bool
ChequebookEnable bool
@@ -504,22 +503,11 @@ func NewBee(
}
if o.SwapEnable {
- chequebookFactory, err = InitChequebookFactory(
- logger,
- chainBackend,
- chainID,
- transactionService,
- o.SwapFactoryAddress,
- o.SwapLegacyFactoryAddresses,
- )
+ chequebookFactory, err = InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress)
if err != nil {
return nil, err
}
- if err = chequebookFactory.VerifyBytecode(ctx); err != nil {
- return nil, fmt.Errorf("factory fail: %w", err)
- }
-
erc20Address, err := chequebookFactory.ERC20Address(ctx)
if err != nil {
return nil, fmt.Errorf("factory fail: %w", err)
@@ -754,6 +742,7 @@ func NewBee(
RadiusSetter: kad,
WarmupDuration: o.WarmupTime,
Logger: logger,
+ Tracer: tracer,
}
if o.FullNodeMode && !o.BootnodeMode {
diff --git a/pkg/node/statestore.go b/pkg/node/statestore.go
index 5fdef7dc418..9579e1487ef 100644
--- a/pkg/node/statestore.go
+++ b/pkg/node/statestore.go
@@ -11,7 +11,6 @@ import (
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/metrics"
- "github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/statestore/storeadapter"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/cache"
@@ -55,12 +54,7 @@ func InitStamperStore(logger log.Logger, dataDir string, stateStore storage.Stat
if err != nil {
return nil, err
}
- // TODO: remove migration after it has been a few months after the localstoreV2 release
- err = migrateStamperData(stateStore, stamperStore)
- if err != nil {
- stamperStore.Close()
- return nil, fmt.Errorf("migrating stamper data: %w", err)
- }
+
return stamperStore, nil
}
@@ -105,30 +99,3 @@ func setOverlay(s storage.StateStorer, overlay swarm.Address, nonce []byte) erro
s.Put(noncedOverlayKey, overlay),
)
}
-
-func migrateStamperData(stateStore storage.StateStorer, stamperStore storage.Store) error {
- var keys []string
- err := stateStore.Iterate("postage", func(key, value []byte) (bool, error) {
- keys = append(keys, string(key))
- st := &postage.StampIssuer{}
- if err := st.UnmarshalBinary(value); err != nil {
- return false, err
- }
- if err := stamperStore.Put(&postage.StampIssuerItem{
- Issuer: st,
- }); err != nil {
- return false, err
- }
- return false, nil
- })
- if err != nil {
- return err
- }
-
- for _, key := range keys {
- if err = stateStore.Delete(key); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/pkg/node/statestore_test.go b/pkg/node/statestore_test.go
deleted file mode 100644
index 55feb9e0e8a..00000000000
--- a/pkg/node/statestore_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package node
-
-import (
- "crypto/rand"
- "fmt"
- "math/big"
- "testing"
- "time"
-
- "github.com/ethersphere/bee/pkg/log"
- "github.com/ethersphere/bee/pkg/postage"
- "github.com/ethersphere/bee/pkg/storage"
-)
-
-func TestInitStamperStore(t *testing.T) {
- dataDir := t.TempDir()
- stateStore, _, err := InitStateStore(log.Noop, dataDir, 100_000)
- if err != nil {
- t.Fatal(err)
- }
-
- ids := make(map[string]int)
-
- // add 10 stamps to the state store
- for i := 0; i < 10; i++ {
- bID := make([]byte, 32)
- _, err = rand.Read(bID)
- if err != nil {
- t.Fatal(err)
- }
- si := postage.NewStampIssuer("", "", bID, big.NewInt(3), 11, 10, 1000, true)
- err = stateStore.Put(fmt.Sprintf("postage%s", string(si.ID())), si)
- if err != nil {
- t.Fatal(err)
- }
- ids[string(si.ID())] = 0
- }
-
- stamperStore, err := InitStamperStore(log.Noop, dataDir, stateStore)
- if err != nil {
- t.Fatal("init stamper store should migrate stamps from state store", err)
- }
-
- err = stamperStore.Iterate(
- storage.Query{
- Factory: func() storage.Item { return new(postage.StampIssuerItem) },
- }, func(result storage.Result) (bool, error) {
- issuer := result.Entry.(*postage.StampIssuerItem).Issuer
- ids[string(issuer.ID())]++
- return false, nil
- })
- if err != nil {
- t.Fatal(err)
- }
-
- var got int
- for _, v := range ids {
- if v > 0 {
- got++
- }
- }
- if got != 10 {
- t.Fatalf("want %d stamps. got %d", 10, got)
- }
-
- t.Cleanup(func() {
- err = stateStore.Close()
- if err != nil {
- t.Fatal(err)
- }
- err = stamperStore.Close()
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(1 * time.Second)
- })
-}
diff --git a/pkg/postage/batchstore/store_test.go b/pkg/postage/batchstore/store_test.go
index 2c13dd2054b..43e24014d33 100644
--- a/pkg/postage/batchstore/store_test.go
+++ b/pkg/postage/batchstore/store_test.go
@@ -222,7 +222,7 @@ func TestBatchStore_Reset(t *testing.T) {
// we expect one key in the statestore since the schema name
// will always be there.
- if c != 1 {
+ if c != 0 {
t.Fatalf("expected only one key in statestore, got %d", c)
}
}
diff --git a/pkg/postage/postagecontract/contract.go b/pkg/postage/postagecontract/contract.go
index 234cc4afdd8..035d7a18f75 100644
--- a/pkg/postage/postagecontract/contract.go
+++ b/pkg/postage/postagecontract/contract.go
@@ -24,7 +24,7 @@ import (
var (
BucketDepth = uint8(16)
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_3_1)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
ErrBatchCreate = errors.New("batch creation failed")
ErrInsufficientFunds = errors.New("insufficient token balance")
diff --git a/pkg/postage/stamper.go b/pkg/postage/stamper.go
index 5f32fa8ecd3..1ee16a8a133 100644
--- a/pkg/postage/stamper.go
+++ b/pkg/postage/stamper.go
@@ -11,12 +11,12 @@ import (
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
+ "resenje.org/multex"
)
var (
// ErrBucketFull is the error when a collision bucket is full.
- ErrBucketFull = errors.New("bucket full")
- ErrOverwriteImmutableIndex = errors.New("immutable batch old index overwrite due to previous faulty save")
+ ErrBucketFull = errors.New("bucket full")
)
// Stamper can issue stamps from the given address of chunk.
@@ -30,31 +30,30 @@ type stamper struct {
store storage.Store
issuer *StampIssuer
signer crypto.Signer
+ mu *multex.Multex
}
// NewStamper constructs a Stamper.
func NewStamper(store storage.Store, issuer *StampIssuer, signer crypto.Signer) Stamper {
- return &stamper{store, issuer, signer}
+ return &stamper{store, issuer, signer, multex.New()}
}
-// Stamp takes chunk, see if the chunk can included in the batch and
+// Stamp takes chunk, see if the chunk can be included in the batch and
// signs it with the owner of the batch of this Stamp issuer.
func (st *stamper) Stamp(addr swarm.Address) (*Stamp, error) {
+ st.mu.Lock(addr.ByteString())
+ defer st.mu.Unlock(addr.ByteString())
+
item := &StampItem{
BatchID: st.issuer.data.BatchID,
chunkAddress: addr,
}
switch err := st.store.Get(item); {
case err == nil:
- // The index should be in the past. It could happen that we encountered
- // some error after assigning this index and did not save the issuer data. In
- // this case we should assign a new index and update it.
- if st.issuer.assigned(item.BatchIndex) {
- break
- } else if st.issuer.ImmutableFlag() {
- return nil, ErrOverwriteImmutableIndex
+ item.BatchTimestamp = unixTime()
+ if err = st.store.Put(item); err != nil {
+ return nil, err
}
- fallthrough
case errors.Is(err, storage.ErrNotFound):
item.BatchIndex, item.BatchTimestamp, err = st.issuer.increment(addr)
if err != nil {
diff --git a/pkg/postage/stamper_test.go b/pkg/postage/stamper_test.go
index 1fc3b3d5c34..520d6da0faf 100644
--- a/pkg/postage/stamper_test.go
+++ b/pkg/postage/stamper_test.go
@@ -106,11 +106,11 @@ func TestStamperStamping(t *testing.T) {
}
})
- t.Run("incorrect old index", func(t *testing.T) {
+ t.Run("reuse index but get new timestamp for mutable or immutable batch", func(t *testing.T) {
st := newTestStampIssuerMutability(t, 1000, false)
chunkAddr := swarm.RandAddress(t)
bIdx := postage.ToBucket(st.BucketDepth(), chunkAddr)
- index := postage.IndexToBytes(bIdx, 100)
+ index := postage.IndexToBytes(bIdx, 4)
testItem := postage.NewStampItem().
WithBatchID(st.ID()).
WithChunkAddress(chunkAddr).
@@ -121,28 +121,16 @@ func TestStamperStamping(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := stamp.Valid(chunkAddr, owner, 12, 8, true); err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
- if bytes.Equal(stamp.Index(), testItem.BatchIndex) {
- t.Fatalf("expected index to be different, got %x", stamp.Index())
- }
- })
-
- t.Run("incorrect old index immutable", func(t *testing.T) {
- st := newTestStampIssuerMutability(t, 1000, true)
- chunkAddr := swarm.RandAddress(t)
- bIdx := postage.ToBucket(st.BucketDepth(), chunkAddr)
- index := postage.IndexToBytes(bIdx, 100)
- testItem := postage.NewStampItem().
- WithBatchID(st.ID()).
- WithChunkAddress(chunkAddr).
- WithBatchIndex(index)
- testSt := &testStore{Store: inmemstore.New(), stampItem: testItem}
- stamper := postage.NewStamper(testSt, st, signer)
- _, err := stamper.Stamp(chunkAddr)
- if !errors.Is(err, postage.ErrOverwriteImmutableIndex) {
- t.Fatalf("got err %v, wanted %v", err, postage.ErrOverwriteImmutableIndex)
+ for _, mutability := range []bool{true, false} {
+ if err := stamp.Valid(chunkAddr, owner, 12, 8, mutability); err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ if bytes.Equal(stamp.Timestamp(), testItem.BatchTimestamp) {
+ t.Fatalf("expected timestamp to be different, got %x", stamp.Index())
+ }
+ if !bytes.Equal(stamp.Index(), testItem.BatchIndex) {
+ t.Fatalf("expected index to be the same, got %x", stamp.Index())
+ }
}
})
diff --git a/pkg/postage/stampissuer.go b/pkg/postage/stampissuer.go
index 2606feb36eb..84b5292528a 100644
--- a/pkg/postage/stampissuer.go
+++ b/pkg/postage/stampissuer.go
@@ -201,14 +201,6 @@ func (si *StampIssuer) increment(addr swarm.Address) (batchIndex []byte, batchTi
return indexToBytes(bIdx, bCnt), unixTime(), nil
}
-// check if this stamp index has already been assigned
-func (si *StampIssuer) assigned(stampIdx []byte) bool {
- si.bucketMtx.Lock()
- defer si.bucketMtx.Unlock()
- b, idx := BucketIndexFromBytes(stampIdx)
- return idx < si.data.Buckets[b]
-}
-
// Label returns the label of the issuer.
func (si *StampIssuer) Label() string {
return si.data.Label
diff --git a/pkg/intervalstore/intervals.go b/pkg/puller/intervalstore/intervals.go
similarity index 100%
rename from pkg/intervalstore/intervals.go
rename to pkg/puller/intervalstore/intervals.go
diff --git a/pkg/intervalstore/intervals_test.go b/pkg/puller/intervalstore/intervals_test.go
similarity index 100%
rename from pkg/intervalstore/intervals_test.go
rename to pkg/puller/intervalstore/intervals_test.go
diff --git a/pkg/intervalstore/main_test.go b/pkg/puller/intervalstore/main_test.go
similarity index 100%
rename from pkg/intervalstore/main_test.go
rename to pkg/puller/intervalstore/main_test.go
diff --git a/pkg/intervalstore/store_test.go b/pkg/puller/intervalstore/store_test.go
similarity index 100%
rename from pkg/intervalstore/store_test.go
rename to pkg/puller/intervalstore/store_test.go
diff --git a/pkg/puller/puller.go b/pkg/puller/puller.go
index 2992ef0a8c3..f2cfd15eee0 100644
--- a/pkg/puller/puller.go
+++ b/pkg/puller/puller.go
@@ -15,9 +15,9 @@ import (
"sync"
"time"
- "github.com/ethersphere/bee/pkg/intervalstore"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/p2p"
+ "github.com/ethersphere/bee/pkg/puller/intervalstore"
"github.com/ethersphere/bee/pkg/pullsync"
"github.com/ethersphere/bee/pkg/rate"
"github.com/ethersphere/bee/pkg/storage"
@@ -95,7 +95,7 @@ func New(
blockLister: blockLister,
rate: rate.New(DefaultHistRateWindow),
cancel: func() { /* Noop, since the context is initialized in the Start(). */ },
- limiter: ratelimit.NewLimiter(ratelimit.Every(time.Second/2), int(swarm.MaxBins)), // allows for 2 syncs per second, max bins bursts
+ limiter: ratelimit.NewLimiter(ratelimit.Every(time.Second/4), int(swarm.MaxBins)), // allows for 2 syncs per second, max bins bursts
}
return p
@@ -129,18 +129,15 @@ func (p *Puller) manage(ctx context.Context) {
p.syncPeersMtx.Lock()
defer p.syncPeersMtx.Unlock()
- // peersDisconnected is used to mark and prune peers that are no longer connected.
- peersDisconnected := make(map[string]*syncPeer)
- for _, peer := range p.syncPeers {
- peersDisconnected[peer.address.ByteString()] = peer
- }
-
newRadius := p.radius.StorageRadius()
// reset all intervals below the new radius to resync:
// 1. previously evicted chunks
// 2. previously ignored chunks due to a higher radius
if newRadius < prevRadius {
+ for _, peer := range p.syncPeers {
+ p.disconnectPeer(peer.address)
+ }
err := p.resetIntervals(prevRadius)
if err != nil {
p.logger.Debug("reset lower sync radius failed", "error", err)
@@ -148,6 +145,12 @@ func (p *Puller) manage(ctx context.Context) {
}
prevRadius = newRadius
+ // peersDisconnected is used to mark and prune peers that are no longer connected.
+ peersDisconnected := make(map[string]*syncPeer)
+ for _, peer := range p.syncPeers {
+ peersDisconnected[peer.address.ByteString()] = peer
+ }
+
_ = p.topology.EachConnectedPeerRev(func(addr swarm.Address, po uint8) (stop, jumpToNext bool, err error) {
if _, ok := p.syncPeers[addr.ByteString()]; !ok {
p.syncPeers[addr.ByteString()] = newSyncPeer(addr, p.bins, po)
@@ -187,7 +190,7 @@ func (p *Puller) disconnectPeer(addr swarm.Address) {
loggerV2.Debug("disconnecting peer", "peer_address", addr)
if peer, ok := p.syncPeers[addr.ByteString()]; ok {
peer.mtx.Lock()
- peer.gone()
+ peer.stop()
peer.mtx.Unlock()
}
delete(p.syncPeers, addr.ByteString())
@@ -229,7 +232,7 @@ func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uin
if storedEpoch != epoch {
// cancel all bins
- peer.gone()
+ peer.stop()
p.logger.Debug("peer epoch change detected, resetting past synced intervals", "stored_epoch", storedEpoch, "new_epoch", epoch, "peer_address", peer.address)
@@ -285,81 +288,97 @@ func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uin
// syncPeerBin will start historical and live syncing for the peer for a particular bin.
// Must be called under syncPeer lock.
-func (p *Puller) syncPeerBin(ctx context.Context, peer *syncPeer, bin uint8, cur uint64) {
- binCtx, cancel := context.WithCancel(ctx)
- peer.setBinCancel(cancel, bin)
- peer.wg.Add(1)
- p.wg.Add(1)
- go p.syncWorker(binCtx, peer.address, bin, cur, peer.wg.Done)
-}
-
-func (p *Puller) syncWorker(ctx context.Context, peer swarm.Address, bin uint8, cur uint64, done func()) {
+func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint8, cursor uint64) {
loggerV2 := p.logger.V(2).Register()
- p.metrics.SyncWorkerCounter.Inc()
- defer p.wg.Done()
- defer p.metrics.SyncWorkerDoneCounter.Inc()
- defer done()
+ ctx, cancel := context.WithCancel(parentCtx)
+ peer.setBinCancel(cancel, bin)
- loggerV2.Debug("syncWorker starting", "peer_address", peer, "bin", bin, "cursor", cur)
+ sync := func(isHistorical bool, address swarm.Address, start uint64, bin uint8, done func()) {
+ p.metrics.SyncWorkerCounter.Inc()
- for {
+ defer p.wg.Done()
+ defer p.metrics.SyncWorkerDoneCounter.Inc()
+ defer done()
- s, _, _, err := p.nextPeerInterval(peer, bin)
- if err != nil {
- p.metrics.SyncWorkerErrCounter.Inc()
- p.logger.Error(err, "syncWorker nextPeerInterval failed, quitting")
- return
- }
-
- // rate limit historical syncing
- if s <= cur {
- _ = p.limiter.Wait(ctx)
- }
+ var (
+ cursor = start
+ err error
+ )
- select {
- case <-ctx.Done():
- loggerV2.Debug("syncWorker context cancelled", "peer_address", peer, "bin", bin)
- return
- default:
- }
+ for {
+ if isHistorical { // overide start with the next interval if historical syncing
+ start, err = p.nextPeerInterval(address, bin)
+ if err != nil {
+ p.metrics.SyncWorkerErrCounter.Inc()
+ p.logger.Error(err, "syncWorker nextPeerInterval failed, quitting")
+ return
+ }
+
+ // historical sync has caught up to the cursor, exit
+ if start > cursor {
+ return
+ }
+ // rate limit historical syncing
+ _ = p.limiter.Wait(ctx)
+ }
- p.metrics.SyncWorkerIterCounter.Inc()
+ select {
+ case <-ctx.Done():
+ loggerV2.Debug("syncWorker context cancelled", "peer_address", address, "bin", bin)
+ return
+ default:
+ }
- syncStart := time.Now()
- top, count, err := p.syncer.Sync(ctx, peer, bin, s)
+ p.metrics.SyncWorkerIterCounter.Inc()
- if top == math.MaxUint64 {
- p.metrics.MaxUintErrCounter.Inc()
- p.logger.Error(nil, "syncWorker max uint64 encountered, quitting", "peer_address", peer, "bin", bin, "from", s, "topmost", top)
- return
- }
+ syncStart := time.Now()
+ top, count, err := p.syncer.Sync(ctx, address, bin, start)
- if top <= cur {
- p.metrics.SyncedCounter.WithLabelValues("historical").Add(float64(count))
- p.rate.Add(count)
- } else {
- p.metrics.SyncedCounter.WithLabelValues("live").Add(float64(count))
- }
+ if top == math.MaxUint64 {
+ p.metrics.MaxUintErrCounter.Inc()
+ p.logger.Error(nil, "syncWorker max uint64 encountered, quitting", "peer_address", address, "bin", bin, "from", start, "topmost", top)
+ return
+ }
- if top >= s {
- if err := p.addPeerInterval(peer, bin, s, top); err != nil {
+ if err != nil {
p.metrics.SyncWorkerErrCounter.Inc()
- p.logger.Error(err, "syncWorker could not persist interval for peer, quitting", "peer_address", peer)
- return
+ if errors.Is(err, p2p.ErrPeerNotFound) {
+ p.logger.Debug("syncWorker interval failed, quitting", "error", err, "peer_address", address, "bin", bin, "cursor", address, "start", start, "topmost", top)
+ return
+ }
+ loggerV2.Debug("syncWorker interval failed", "error", err, "peer_address", address, "bin", bin, "cursor", address, "start", start, "topmost", top)
}
- loggerV2.Debug("syncWorker pulled", "bin", bin, "start", s, "topmost", top, "duration", time.Since(syncStart), "peer_address", peer)
- }
- if err != nil {
- p.metrics.SyncWorkerErrCounter.Inc()
- if errors.Is(err, p2p.ErrPeerNotFound) {
- p.logger.Debug("syncWorker interval failed, quitting", "error", err, "peer_address", peer, "bin", bin, "cursor", cur, "start", s, "topmost", top)
- return
+ if isHistorical {
+ p.metrics.SyncedCounter.WithLabelValues("historical").Add(float64(count))
+ p.rate.Add(count)
+ } else {
+ p.metrics.SyncedCounter.WithLabelValues("live").Add(float64(count))
+ }
+
+ // pulled at least one chunk
+ if top >= start {
+ if err := p.addPeerInterval(address, bin, start, top); err != nil {
+ p.metrics.SyncWorkerErrCounter.Inc()
+ p.logger.Error(err, "syncWorker could not persist interval for peer, quitting", "peer_address", address)
+ return
+ }
+ loggerV2.Debug("syncWorker pulled", "bin", bin, "start", start, "topmost", top, "isHistorical", isHistorical, "duration", time.Since(syncStart), "peer_address", address)
+ start = top + 1
}
- p.logger.Debug("syncWorker interval failed", "error", err, "peer_address", peer, "bin", bin, "cursor", cur, "start", s, "topmost", top)
}
}
+
+ if cursor > 0 {
+ peer.wg.Add(1)
+ p.wg.Add(1)
+ go sync(true, peer.address, cursor, bin, peer.wg.Done)
+ }
+
+ peer.wg.Add(1)
+ p.wg.Add(1)
+ go sync(false, peer.address, cursor+1, bin, peer.wg.Done)
}
func (p *Puller) Close() error {
@@ -443,17 +462,17 @@ func (p *Puller) resetIntervals(upto uint8) (err error) {
return
}
-func (p *Puller) nextPeerInterval(peer swarm.Address, bin uint8) (start, end uint64, empty bool, err error) {
+func (p *Puller) nextPeerInterval(peer swarm.Address, bin uint8) (uint64, error) {
p.intervalMtx.Lock()
defer p.intervalMtx.Unlock()
i, err := p.getOrCreateInterval(peer, bin)
if err != nil {
- return 0, 0, false, err
+ return 0, err
}
- start, end, empty = i.Next(0)
- return start, end, empty, nil
+ start, _, _ := i.Next(0)
+ return start, nil
}
// Must be called underlock.
@@ -506,7 +525,7 @@ func newSyncPeer(addr swarm.Address, bins, po uint8) *syncPeer {
}
// called when peer disconnects or on shutdown, cleans up ongoing sync operations
-func (p *syncPeer) gone() {
+func (p *syncPeer) stop() {
for bin, c := range p.binCancelFuncs {
c()
delete(p.binCancelFuncs, bin)
diff --git a/pkg/puller/puller_test.go b/pkg/puller/puller_test.go
index 339e492f488..f76fe292432 100644
--- a/pkg/puller/puller_test.go
+++ b/pkg/puller/puller_test.go
@@ -11,9 +11,9 @@ import (
"testing"
"time"
- "github.com/ethersphere/bee/pkg/intervalstore"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/puller"
+ "github.com/ethersphere/bee/pkg/puller/intervalstore"
mockps "github.com/ethersphere/bee/pkg/pullsync/mock"
"github.com/ethersphere/bee/pkg/spinlock"
"github.com/ethersphere/bee/pkg/statestore/mock"
@@ -438,11 +438,12 @@ func TestContinueSyncing(t *testing.T) {
time.Sleep(100 * time.Millisecond)
kad.Trigger()
- time.Sleep(time.Second)
- calls := len(pullsync.SyncCalls(addr))
- if calls != 1 {
- t.Fatalf("unexpected amount of calls, got %d", calls)
+ err := spinlock.Wait(time.Second, func() bool {
+ return len(pullsync.SyncCalls(addr)) == 1
+ })
+ if err != nil {
+ t.Fatal(err)
}
}
diff --git a/pkg/pusher/pusher.go b/pkg/pusher/pusher.go
index 5e38137979f..457af2f58a3 100644
--- a/pkg/pusher/pusher.go
+++ b/pkg/pusher/pusher.go
@@ -25,6 +25,9 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
)
// loggerName is the tree path name of the logger for this package.
@@ -34,6 +37,7 @@ type Op struct {
Chunk swarm.Chunk
Err chan error
Direct bool
+ Span opentracing.Span
}
type OpChan <-chan *Op
@@ -57,6 +61,7 @@ type Service struct {
inflight *inflight
attempts *attempts
smuggler chan OpChan
+ tracer *tracing.Tracer
}
const (
@@ -94,6 +99,7 @@ func New(
inflight: newInflight(),
attempts: &attempts{retryCount: retryCount, attempts: make(map[string]int)},
smuggler: make(chan OpChan),
+ tracer: tracer,
}
go p.chunksWorker(warmupTime, tracer)
return p
@@ -118,28 +124,20 @@ func (s *Service) chunksWorker(warmupTime time.Duration, tracer *tracing.Tracer)
}
var (
- cctx, cancel = context.WithCancel(context.Background())
- mtx sync.Mutex
- wg sync.WaitGroup
- span, logger, ctx = tracer.StartSpanFromContext(cctx, "pusher-sync-batch", s.logger)
- timer = time.NewTimer(traceDuration)
- sem = make(chan struct{}, ConcurrentPushes)
- cc = make(chan *Op)
+ ctx, cancel = context.WithCancel(context.Background())
+ sem = make(chan struct{}, ConcurrentPushes)
+ cc = make(chan *Op)
)
// inflight.set handles the backpressure for the maximum amount of inflight chunks
// and duplicate handling.
- chunks, unsubscribe := s.storer.SubscribePush(cctx)
+ chunks, unsubscribe := s.storer.SubscribePush(ctx)
defer func() {
unsubscribe()
cancel()
}()
- ctxLogger := func() (context.Context, log.Logger) {
- mtx.Lock()
- defer mtx.Unlock()
- return ctx, logger
- }
+ var wg sync.WaitGroup
push := func(op *Op) {
var (
@@ -168,39 +166,33 @@ func (s *Service) chunksWorker(warmupTime time.Duration, tracer *tracing.Tracer)
}()
s.metrics.TotalToPush.Inc()
- ctx, logger := ctxLogger()
startTime := time.Now()
+ spanCtx := ctx
+ if op.Span != nil {
+ spanCtx = tracing.WithContext(spanCtx, op.Span.Context())
+ } else {
+ op.Span = opentracing.NoopTracer{}.StartSpan("noOp")
+ }
+
if op.Direct {
- err = s.pushDirect(ctx, logger, op)
+ err = s.pushDirect(spanCtx, s.logger, op)
} else {
- doRepeat, err = s.pushDeferred(ctx, logger, op)
+ doRepeat, err = s.pushDeferred(spanCtx, s.logger, op)
}
if err != nil {
s.metrics.TotalErrors.Inc()
s.metrics.ErrorTime.Observe(time.Since(startTime).Seconds())
+ ext.LogError(op.Span, err)
+ } else {
+ op.Span.LogFields(olog.Bool("success", true))
}
s.metrics.SyncTime.Observe(time.Since(startTime).Seconds())
s.metrics.TotalSynced.Inc()
}
- go func() {
- for {
- select {
- case <-s.quit:
- return
- case <-timer.C:
- // reset the span
- mtx.Lock()
- span.Finish()
- span, logger, ctx = tracer.StartSpanFromContext(cctx, "pusher-sync-batch", s.logger)
- mtx.Unlock()
- }
- }
- }()
-
go func() {
for {
select {
@@ -393,6 +385,7 @@ func (s *Service) AddFeed(c <-chan *Op) {
go func() {
select {
case s.smuggler <- c:
+ s.logger.Info("got a chunk being smuggled")
case <-s.quit:
}
}()
diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go
index 08832c3f110..8f1597384a6 100644
--- a/pkg/pushsync/pushsync.go
+++ b/pkg/pushsync/pushsync.go
@@ -28,6 +28,8 @@ import (
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
)
// loggerName is the tree path name of the logger for this package.
@@ -186,8 +188,27 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunkAddress := chunk.Address()
- span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
- defer span.Finish()
+ span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()}, opentracing.Tag{Key: "tagID", Value: chunk.TagID()}, opentracing.Tag{Key: "sender_address", Value: p.Address.String()})
+
+ var (
+ stored bool
+ reason string
+ )
+
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ var logs []olog.Field
+ logs = append(logs, olog.Bool("success", true))
+ if stored {
+ logs = append(logs, olog.Bool("stored", true))
+ logs = append(logs, olog.String("reason", reason))
+ }
+ span.LogFields(logs...)
+ }
+ span.Finish()
+ }()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(ch.Stamp)
@@ -240,12 +261,14 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
}
if ps.topologyDriver.IsReachable() && ps.store.IsWithinStorageRadius(chunkAddress) {
+ stored, reason = true, "is within AOR"
return store(ctx)
}
receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
+ stored, reason = true, "want self"
return store(ctx)
}
@@ -437,22 +460,25 @@ func (ps *PushSync) closestPeer(chunkAddress swarm.Address, origin bool) (swarm.
}
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
-
- span := tracing.FromContext(parentCtx)
-
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
defer cancel()
- spanInner, _, ctx := ps.tracer.StartSpanFromContext(tracing.WithContext(ctx, span), "push-closest", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
- defer spanInner.Finish()
-
var (
err error
receipt *pb.Receipt
- now = time.Now()
)
+ now := time.Now()
+
+ spanInner, _, _ := ps.tracer.FollowSpanFromContext(context.WithoutCancel(parentCtx), "push-chunk-async", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
+
defer func() {
+ if err != nil {
+ ext.LogError(spanInner, err)
+ } else {
+ spanInner.LogFields(olog.Bool("success", true))
+ }
+ spanInner.Finish()
select {
case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
case <-parentCtx.Done():
@@ -461,7 +487,9 @@ func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptRes
defer action.Cleanup()
- receipt, err = ps.pushChunkToPeer(ctx, peer, ch)
+ spanInner.LogFields(olog.String("peer_address", peer.String()))
+
+ receipt, err = ps.pushChunkToPeer(tracing.WithContext(ctx, spanInner.Context()), peer, ch)
if err != nil {
return
}
diff --git a/pkg/resolver/client/ens/ens_integration_test.go b/pkg/resolver/client/ens/ens_integration_test.go
index 2f38740e32b..139347c2db8 100644
--- a/pkg/resolver/client/ens/ens_integration_test.go
+++ b/pkg/resolver/client/ens/ens_integration_test.go
@@ -16,7 +16,7 @@ import (
func TestENSIntegration(t *testing.T) {
// TODO: consider using a stable gateway instead of INFURA.
- defaultEndpoint := "https://goerli.infura.io/v3/59d83a5a4be74f86b9851190c802297b"
+ defaultEndpoint := "https://sepolia.infura.io/v3/59d83a5a4be74f86b9851190c802297b"
defaultAddr := swarm.MustParseHexAddress("00cb23598c2e520b6a6aae3ddc94fed4435a2909690bdd709bf9d9e7c2aadfad")
testCases := []struct {
diff --git a/pkg/retrieval/retrieval.go b/pkg/retrieval/retrieval.go
index 6b5a037e248..cae78558df9 100644
--- a/pkg/retrieval/retrieval.go
+++ b/pkg/retrieval/retrieval.go
@@ -28,6 +28,8 @@ import (
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
"resenje.org/singleflight"
)
@@ -152,6 +154,8 @@ func (s *Service) RetrieveChunk(ctx context.Context, chunkAddr, sourcePeerAddr s
s.metrics.RequestAttempts.Observe(float64(totalRetrieveAttempts))
}()
+ spanCtx := context.WithoutCancel(ctx)
+
v, _, err := s.singleflight.Do(ctx, flightRoute, func(ctx context.Context) (swarm.Chunk, error) {
skip := skippeers.NewList()
@@ -257,10 +261,9 @@ func (s *Service) RetrieveChunk(ctx context.Context, chunkAddr, sourcePeerAddr s
inflight++
go func() {
- ctx := tracing.WithContext(context.Background(), tracing.FromContext(ctx)) // todo: replace with `ctx := context.WithoutCancel(ctx)` when go 1.21 is supported to pass all context values
- span, _, ctx := s.tracer.StartSpanFromContext(ctx, "retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: chunkAddr.String()})
+ span, _, ctx := s.tracer.FollowSpanFromContext(spanCtx, "retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: chunkAddr.String()})
defer span.Finish()
- s.retrieveChunk(ctx, quit, chunkAddr, peer, resultC, action, origin)
+ s.retrieveChunk(ctx, quit, chunkAddr, peer, resultC, action, origin, span)
}()
case res := <-resultC:
@@ -294,7 +297,7 @@ func (s *Service) RetrieveChunk(ctx context.Context, chunkAddr, sourcePeerAddr s
return v, nil
}
-func (s *Service) retrieveChunk(ctx context.Context, quit chan struct{}, chunkAddr, peer swarm.Address, result chan retrievalResult, action accounting.Action, isOrigin bool) {
+func (s *Service) retrieveChunk(ctx context.Context, quit chan struct{}, chunkAddr, peer swarm.Address, result chan retrievalResult, action accounting.Action, isOrigin bool, span opentracing.Span) {
var (
startTime = time.Now()
@@ -305,7 +308,10 @@ func (s *Service) retrieveChunk(ctx context.Context, quit chan struct{}, chunkAd
defer func() {
action.Cleanup()
if err != nil {
+ ext.LogError(span, err)
s.metrics.TotalErrors.Inc()
+ } else {
+ span.LogFields(olog.Bool("success", true))
}
select {
case result <- retrievalResult{err: err, chunk: chunk, peer: peer}:
@@ -446,10 +452,19 @@ func (s *Service) handler(p2pctx context.Context, p p2p.Peer, stream p2p.Stream)
return fmt.Errorf("invalid address queried by peer %s", p.Address.String())
}
+ var forwarded bool
+
span, _, ctx := s.tracer.StartSpanFromContext(ctx, "handle-retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: addr.String()})
- defer span.Finish()
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ span.LogFields(olog.Bool("success", true))
+ }
+ span.LogFields(olog.Bool("forwarded", forwarded))
+ span.Finish()
+ }()
- forwarded := false
chunk, err := s.storer.Lookup().Get(ctx, addr)
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
diff --git a/pkg/settlement/swap/chequebook/cashout_test.go b/pkg/settlement/swap/chequebook/cashout_test.go
index e435ce2f6ad..2aa3dc992c5 100644
--- a/pkg/settlement/swap/chequebook/cashout_test.go
+++ b/pkg/settlement/swap/chequebook/cashout_test.go
@@ -21,7 +21,7 @@ import (
)
var (
- chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_3_1)
+ chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_5)
chequeCashedEventType = chequebookABI.Events["ChequeCashed"]
chequeBouncedEventType = chequebookABI.Events["ChequeBounced"]
)
diff --git a/pkg/settlement/swap/chequebook/chequebook.go b/pkg/settlement/swap/chequebook/chequebook.go
index b6e47b4a632..75d8e7876ca 100644
--- a/pkg/settlement/swap/chequebook/chequebook.go
+++ b/pkg/settlement/swap/chequebook/chequebook.go
@@ -38,7 +38,7 @@ var (
// ErrInsufficientFunds is the error when the chequebook has not enough free funds for a user action
ErrInsufficientFunds = errors.New("insufficient token balance")
- chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_3_1)
+ chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_5)
chequeCashedEventType = chequebookABI.Events["ChequeCashed"]
chequeBouncedEventType = chequebookABI.Events["ChequeBounced"]
)
diff --git a/pkg/settlement/swap/chequebook/common_test.go b/pkg/settlement/swap/chequebook/common_test.go
index b2057272f11..ef3b6988c25 100644
--- a/pkg/settlement/swap/chequebook/common_test.go
+++ b/pkg/settlement/swap/chequebook/common_test.go
@@ -24,7 +24,6 @@ type factoryMock struct {
erc20Address func(ctx context.Context) (common.Address, error)
deploy func(ctx context.Context, issuer common.Address, defaultHardDepositTimeoutDuration *big.Int, nonce common.Hash) (common.Hash, error)
waitDeployed func(ctx context.Context, txHash common.Hash) (common.Address, error)
- verifyBytecode func(ctx context.Context) error
verifyChequebook func(ctx context.Context, chequebook common.Address) error
}
@@ -41,11 +40,6 @@ func (m *factoryMock) WaitDeployed(ctx context.Context, txHash common.Hash) (com
return m.waitDeployed(ctx, txHash)
}
-// VerifyBytecode checks that the factory is valid.
-func (m *factoryMock) VerifyBytecode(ctx context.Context) error {
- return m.verifyBytecode(ctx)
-}
-
// VerifyChequebook checks that the supplied chequebook has been deployed by this factory.
func (m *factoryMock) VerifyChequebook(ctx context.Context, chequebook common.Address) error {
return m.verifyChequebook(ctx, chequebook)
diff --git a/pkg/settlement/swap/chequebook/factory.go b/pkg/settlement/swap/chequebook/factory.go
index 41d029b23bc..0f8bf3aea2f 100644
--- a/pkg/settlement/swap/chequebook/factory.go
+++ b/pkg/settlement/swap/chequebook/factory.go
@@ -5,7 +5,6 @@
package chequebook
import (
- "bytes"
"errors"
"fmt"
"math/big"
@@ -24,7 +23,7 @@ var (
ErrNotDeployedByFactory = errors.New("chequebook not deployed by factory")
errDecodeABI = errors.New("could not decode abi data")
- factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_4_0)
+ factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_5)
simpleSwapDeployedEventType = factoryABI.Events["SimpleSwapDeployed"]
)
@@ -36,8 +35,6 @@ type Factory interface {
Deploy(ctx context.Context, issuer common.Address, defaultHardDepositTimeoutDuration *big.Int, nonce common.Hash) (common.Hash, error)
// WaitDeployed waits for the deployment transaction to confirm and returns the chequebook address
WaitDeployed(ctx context.Context, txHash common.Hash) (common.Address, error)
- // VerifyBytecode checks that the factory is valid.
- VerifyBytecode(ctx context.Context) error
// VerifyChequebook checks that the supplied chequebook has been deployed by this factory.
VerifyChequebook(ctx context.Context, chequebook common.Address) error
}
@@ -45,30 +42,19 @@ type Factory interface {
type factory struct {
backend transaction.Backend
transactionService transaction.Service
- address common.Address // address of the factory to use for deployments
- legacyAddresses []common.Address // addresses of old factories which were allowed for deployment
+ address common.Address // address of the factory to use for deployments
}
type simpleSwapDeployedEvent struct {
ContractAddress common.Address
}
-// the bytecode of factories which can be used for deployment
-var currentDeployVersion []byte = common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_4_0)
-
-// the bytecode of factories from which we accept chequebooks
-var supportedVersions = [][]byte{
- currentDeployVersion,
- common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_3_1),
-}
-
// NewFactory creates a new factory service for the provided factory contract.
-func NewFactory(backend transaction.Backend, transactionService transaction.Service, address common.Address, legacyAddresses []common.Address) Factory {
+func NewFactory(backend transaction.Backend, transactionService transaction.Service, address common.Address) Factory {
return &factory{
backend: backend,
transactionService: transactionService,
address: address,
- legacyAddresses: legacyAddresses,
}
}
@@ -112,36 +98,6 @@ func (c *factory) WaitDeployed(ctx context.Context, txHash common.Hash) (common.
return event.ContractAddress, nil
}
-// VerifyBytecode checks that the factory is valid.
-func (c *factory) VerifyBytecode(ctx context.Context) (err error) {
- code, err := c.backend.CodeAt(ctx, c.address, nil)
- if err != nil {
- return err
- }
-
- if !bytes.Equal(code, currentDeployVersion) {
- return ErrInvalidFactory
- }
-
-LOOP:
- for _, factoryAddress := range c.legacyAddresses {
- code, err := c.backend.CodeAt(ctx, factoryAddress, nil)
- if err != nil {
- return err
- }
-
- for _, referenceCode := range supportedVersions {
- if bytes.Equal(code, referenceCode) {
- continue LOOP
- }
- }
-
- return fmt.Errorf("failed to find matching bytecode for factory %x: %w", factoryAddress, ErrInvalidFactory)
- }
-
- return nil
-}
-
func (c *factory) verifyChequebookAgainstFactory(ctx context.Context, factory, chequebook common.Address) (bool, error) {
callData, err := factoryABI.Pack("deployedContracts", chequebook)
if err != nil {
@@ -184,17 +140,6 @@ func (c *factory) VerifyChequebook(ctx context.Context, chequebook common.Addres
if deployed {
return nil
}
-
- for _, factoryAddress := range c.legacyAddresses {
- deployed, err := c.verifyChequebookAgainstFactory(ctx, factoryAddress, chequebook)
- if err != nil {
- return err
- }
- if deployed {
- return nil
- }
- }
-
return ErrNotDeployedByFactory
}
diff --git a/pkg/settlement/swap/chequebook/factory_test.go b/pkg/settlement/swap/chequebook/factory_test.go
index 0f65f3f0e4c..d7223330ea4 100644
--- a/pkg/settlement/swap/chequebook/factory_test.go
+++ b/pkg/settlement/swap/chequebook/factory_test.go
@@ -7,7 +7,6 @@ package chequebook_test
import (
"context"
"errors"
- "fmt"
"math/big"
"testing"
@@ -22,7 +21,7 @@ import (
)
var (
- factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_4_0)
+ factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_5)
simpleSwapDeployedEvent = factoryABI.Events["SimpleSwapDeployed"]
)
@@ -31,19 +30,14 @@ func TestFactoryERC20Address(t *testing.T) {
factoryAddress := common.HexToAddress("0xabcd")
erc20Address := common.HexToAddress("0xeffff")
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithABICall(
- &factoryABI,
- factoryAddress,
- common.BytesToHash(erc20Address.Bytes()).Bytes(),
- "ERC20Address",
- ),
+ factory := chequebook.NewFactory(backendmock.New(), transactionmock.New(
+ transactionmock.WithABICall(
+ &factoryABI,
+ factoryAddress,
+ common.BytesToHash(erc20Address.Bytes()).Bytes(),
+ "ERC20Address",
),
- factoryAddress,
- nil,
- )
+ ), factoryAddress)
addr, err := factory.ERC20Address(context.Background())
if err != nil {
@@ -55,204 +49,29 @@ func TestFactoryERC20Address(t *testing.T) {
}
}
-func backendWithCodeAt(codeMap map[common.Address]string) transaction.Backend {
- return backendmock.New(
- backendmock.WithCodeAtFunc(func(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
- code, ok := codeMap[contract]
- if !ok {
- return nil, fmt.Errorf("called with wrong address. wanted one of %v, got %x", codeMap, contract)
- }
- if blockNumber != nil {
- return nil, errors.New("not called for latest block")
- }
- return common.FromHex(code), nil
- }),
- )
-}
-
-func TestFactoryVerifySelf(t *testing.T) {
- t.Parallel()
-
- factoryAddress := common.HexToAddress("0xabcd")
- legacyFactory1 := common.HexToAddress("0xbbbb")
- legacyFactory2 := common.HexToAddress("0xcccc")
-
- t.Run("valid", func(t *testing.T) {
- t.Parallel()
-
- factory := chequebook.NewFactory(
- backendWithCodeAt(map[common.Address]string{
- factoryAddress: sw3abi.SimpleSwapFactoryDeployedBinv0_4_0,
- legacyFactory1: sw3abi.SimpleSwapFactoryDeployedBinv0_3_1,
- legacyFactory2: sw3abi.SimpleSwapFactoryDeployedBinv0_3_1,
- }),
- transactionmock.New(),
- factoryAddress,
- []common.Address{legacyFactory1, legacyFactory2},
- )
-
- err := factory.VerifyBytecode(context.Background())
- if err != nil {
- t.Fatal(err)
- }
- })
-
- t.Run("invalid deploy factory", func(t *testing.T) {
- t.Parallel()
-
- factory := chequebook.NewFactory(
- backendWithCodeAt(map[common.Address]string{
- factoryAddress: "abcd",
- }),
- transactionmock.New(),
- factoryAddress,
- nil,
- )
-
- err := factory.VerifyBytecode(context.Background())
- if err == nil {
- t.Fatal("verified invalid factory")
- }
- if !errors.Is(err, chequebook.ErrInvalidFactory) {
- t.Fatalf("wrong error. wanted %v, got %v", chequebook.ErrInvalidFactory, err)
- }
- })
-
- t.Run("invalid legacy factories", func(t *testing.T) {
- t.Parallel()
-
- factory := chequebook.NewFactory(
- backendWithCodeAt(map[common.Address]string{
- factoryAddress: sw3abi.SimpleSwapFactoryDeployedBinv0_4_0,
- legacyFactory1: sw3abi.SimpleSwapFactoryDeployedBinv0_3_1,
- legacyFactory2: "abcd",
- }),
- transactionmock.New(),
- factoryAddress,
- []common.Address{legacyFactory1, legacyFactory2},
- )
-
- err := factory.VerifyBytecode(context.Background())
- if err == nil {
- t.Fatal("verified invalid factory")
- }
- if !errors.Is(err, chequebook.ErrInvalidFactory) {
- t.Fatalf("wrong error. wanted %v, got %v", chequebook.ErrInvalidFactory, err)
- }
- })
-}
-
func TestFactoryVerifyChequebook(t *testing.T) {
t.Parallel()
factoryAddress := common.HexToAddress("0xabcd")
chequebookAddress := common.HexToAddress("0xefff")
- legacyFactory1 := common.HexToAddress("0xbbbb")
- legacyFactory2 := common.HexToAddress("0xcccc")
t.Run("valid", func(t *testing.T) {
t.Parallel()
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithABICall(
- &factoryABI,
- factoryAddress,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
- "deployedContracts",
- chequebookAddress,
- ),
+ factory := chequebook.NewFactory(backendmock.New(), transactionmock.New(
+ transactionmock.WithABICall(
+ &factoryABI,
+ factoryAddress,
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
+ "deployedContracts",
+ chequebookAddress,
),
- factoryAddress,
- []common.Address{legacyFactory1, legacyFactory2},
- )
+ ), factoryAddress)
err := factory.VerifyChequebook(context.Background(), chequebookAddress)
if err != nil {
t.Fatal(err)
}
})
-
- t.Run("valid legacy", func(t *testing.T) {
- t.Parallel()
-
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithABICallSequence(
- transactionmock.ABICall(
- &factoryABI,
- factoryAddress,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"),
- "deployedContracts",
- chequebookAddress,
- ),
- transactionmock.ABICall(
- &factoryABI,
- legacyFactory1,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"),
- "deployedContracts",
- chequebookAddress,
- ),
- transactionmock.ABICall(
- &factoryABI,
- legacyFactory2,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
- "deployedContracts",
- chequebookAddress,
- ),
- )),
- factoryAddress,
- []common.Address{legacyFactory1, legacyFactory2},
- )
-
- err := factory.VerifyChequebook(context.Background(), chequebookAddress)
- if err != nil {
- t.Fatal(err)
- }
- })
-
- t.Run("invalid", func(t *testing.T) {
- t.Parallel()
-
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithABICallSequence(
- transactionmock.ABICall(
- &factoryABI,
- factoryAddress,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"),
- "deployedContracts",
- chequebookAddress,
- ),
- transactionmock.ABICall(
- &factoryABI,
- legacyFactory1,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"),
- "deployedContracts",
- chequebookAddress,
- ),
- transactionmock.ABICall(
- &factoryABI,
- legacyFactory2,
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"),
- "deployedContracts",
- chequebookAddress,
- ),
- )),
- factoryAddress,
- []common.Address{legacyFactory1, legacyFactory2},
- )
-
- err := factory.VerifyChequebook(context.Background(), chequebookAddress)
- if err == nil {
- t.Fatal("verified invalid chequebook")
- }
- if !errors.Is(err, chequebook.ErrNotDeployedByFactory) {
- t.Fatalf("wrong error. wanted %v, got %v", chequebook.ErrNotDeployedByFactory, err)
- }
- })
}
func TestFactoryDeploy(t *testing.T) {
@@ -265,36 +84,31 @@ func TestFactoryDeploy(t *testing.T) {
deployAddress := common.HexToAddress("0xdddd")
nonce := common.HexToHash("eeff")
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithABISend(&factoryABI, deployTransactionHash, factoryAddress, big.NewInt(0), "deploySimpleSwap", issuerAddress, defaultTimeout, nonce),
- transactionmock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
- if txHash != deployTransactionHash {
- t.Fatalf("waiting for wrong transaction. wanted %x, got %x", deployTransactionHash, txHash)
- }
- logData, err := simpleSwapDeployedEvent.Inputs.NonIndexed().Pack(deployAddress)
- if err != nil {
- t.Fatal(err)
- }
- return &types.Receipt{
- Status: 1,
- Logs: []*types.Log{
- {
- Data: logData,
- },
- {
- Address: factoryAddress,
- Topics: []common.Hash{simpleSwapDeployedEvent.ID},
- Data: logData,
- },
+ factory := chequebook.NewFactory(backendmock.New(), transactionmock.New(
+ transactionmock.WithABISend(&factoryABI, deployTransactionHash, factoryAddress, big.NewInt(0), "deploySimpleSwap", issuerAddress, defaultTimeout, nonce),
+ transactionmock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
+ if txHash != deployTransactionHash {
+ t.Fatalf("waiting for wrong transaction. wanted %x, got %x", deployTransactionHash, txHash)
+ }
+ logData, err := simpleSwapDeployedEvent.Inputs.NonIndexed().Pack(deployAddress)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return &types.Receipt{
+ Status: 1,
+ Logs: []*types.Log{
+ {
+ Data: logData,
},
- }, nil
- },
- )),
- factoryAddress,
- nil,
- )
+ {
+ Address: factoryAddress,
+ Topics: []common.Hash{simpleSwapDeployedEvent.ID},
+ Data: logData,
+ },
+ },
+ }, nil
+ },
+ )), factoryAddress)
txHash, err := factory.Deploy(context.Background(), issuerAddress, defaultTimeout, nonce)
if err != nil {
@@ -320,21 +134,16 @@ func TestFactoryDeployReverted(t *testing.T) {
factoryAddress := common.HexToAddress("0xabcd")
deployTransactionHash := common.HexToHash("0xffff")
- factory := chequebook.NewFactory(
- backendmock.New(),
- transactionmock.New(
- transactionmock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
- if txHash != deployTransactionHash {
- t.Fatalf("waiting for wrong transaction. wanted %x, got %x", deployTransactionHash, txHash)
- }
- return &types.Receipt{
- Status: 0,
- }, nil
- }),
- ),
- factoryAddress,
- nil,
- )
+ factory := chequebook.NewFactory(backendmock.New(), transactionmock.New(
+ transactionmock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
+ if txHash != deployTransactionHash {
+ t.Fatalf("waiting for wrong transaction. wanted %x, got %x", deployTransactionHash, txHash)
+ }
+ return &types.Receipt{
+ Status: 0,
+ }, nil
+ }),
+ ), factoryAddress)
_, err := factory.WaitDeployed(context.Background(), deployTransactionHash)
if err == nil {
diff --git a/pkg/settlement/swap/chequebook/init.go b/pkg/settlement/swap/chequebook/init.go
index b8b21f154bb..19bfc3c56b0 100644
--- a/pkg/settlement/swap/chequebook/init.go
+++ b/pkg/settlement/swap/chequebook/init.go
@@ -133,12 +133,6 @@ func Init(
) (chequebookService Service, err error) {
logger = logger.WithName(loggerName).Register()
- // verify that the supplied factory is valid
- err = chequebookFactory.VerifyBytecode(ctx)
- if err != nil {
- return nil, err
- }
-
var chequebookAddress common.Address
err = stateStore.Get(chequebookKey, &chequebookAddress)
if err != nil {
diff --git a/pkg/settlement/swap/erc20/erc20.go b/pkg/settlement/swap/erc20/erc20.go
index c7b76a424ae..510cc43769b 100644
--- a/pkg/settlement/swap/erc20/erc20.go
+++ b/pkg/settlement/swap/erc20/erc20.go
@@ -18,7 +18,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_3_1)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
errDecodeABI = errors.New("could not decode abi data")
)
diff --git a/pkg/settlement/swap/erc20/erc20_test.go b/pkg/settlement/swap/erc20/erc20_test.go
index 56287aab5c1..a51652ccdd0 100644
--- a/pkg/settlement/swap/erc20/erc20_test.go
+++ b/pkg/settlement/swap/erc20/erc20_test.go
@@ -17,7 +17,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_3_1)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
)
func TestBalanceOf(t *testing.T) {
diff --git a/pkg/settlement/swap/priceoracle/priceoracle.go b/pkg/settlement/swap/priceoracle/priceoracle.go
index f67dcd90083..06ab07d5e6c 100644
--- a/pkg/settlement/swap/priceoracle/priceoracle.go
+++ b/pkg/settlement/swap/priceoracle/priceoracle.go
@@ -47,7 +47,7 @@ type Service interface {
}
var (
- priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_1_0)
+ priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_2_0)
)
func New(logger log.Logger, priceOracleAddress common.Address, transactionService transaction.Service, timeDivisor int64) Service {
diff --git a/pkg/settlement/swap/priceoracle/priceoracle_test.go b/pkg/settlement/swap/priceoracle/priceoracle_test.go
index 720220ad099..2404d991058 100644
--- a/pkg/settlement/swap/priceoracle/priceoracle_test.go
+++ b/pkg/settlement/swap/priceoracle/priceoracle_test.go
@@ -18,7 +18,7 @@ import (
)
var (
- priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_1_0)
+ priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_2_0)
)
func TestExchangeGetPrice(t *testing.T) {
diff --git a/pkg/statestore/leveldb/export_test.go b/pkg/statestore/leveldb/export_test.go
deleted file mode 100644
index 75486dc5af2..00000000000
--- a/pkg/statestore/leveldb/export_test.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package leveldb
-
-var DbSchemaCurrent = dbSchemaCurrent
-
-func (s *Store) GetSchemaName() (string, error) {
- return s.getSchemaName()
-}
diff --git a/pkg/statestore/leveldb/leveldb.go b/pkg/statestore/leveldb/leveldb.go
index 56f2f9bc8c3..65d2cc471ac 100644
--- a/pkg/statestore/leveldb/leveldb.go
+++ b/pkg/statestore/leveldb/leveldb.go
@@ -25,8 +25,7 @@ import (
const loggerName = "leveldb"
var (
- _ storage.StateStorer = (*Store)(nil)
- _ storage.StateStorerCleaner = (*Store)(nil)
+ _ storage.StateStorer = (*Store)(nil)
)
// Store uses LevelDB to store values.
@@ -46,10 +45,6 @@ func NewInMemoryStateStore(l log.Logger) (*Store, error) {
logger: l.WithName(loggerName).Register(),
}
- if err := migrate(s); err != nil {
- return nil, err
- }
-
return s, nil
}
@@ -76,36 +71,9 @@ func NewStateStore(path string, l log.Logger) (*Store, error) {
logger: l,
}
- if err := migrate(s); err != nil {
- return nil, err
- }
-
return s, nil
}
-func migrate(s *Store) error {
- sn, err := s.getSchemaName()
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- _ = s.Close()
- return fmt.Errorf("get schema name: %w", err)
- }
- // new statestore - put schema key with current name
- if err := s.putSchemaName(dbSchemaCurrent); err != nil {
- _ = s.Close()
- return fmt.Errorf("put schema name: %w", err)
- }
- sn = dbSchemaCurrent
- }
-
- if err = s.migrate(sn); err != nil {
- _ = s.Close()
- return fmt.Errorf("migrate: %w", err)
- }
-
- return nil
-}
-
// Get retrieves a value of the requested key. If no results are found,
// storage.ErrNotFound will be returned.
func (s *Store) Get(key string, i interface{}) error {
@@ -161,21 +129,6 @@ func (s *Store) Iterate(prefix string, iterFunc storage.StateIterFunc) (err erro
return iter.Error()
}
-func (s *Store) getSchemaName() (string, error) {
- name, err := s.db.Get([]byte(dbSchemaKey), nil)
- if err != nil {
- if errors.Is(err, leveldb.ErrNotFound) {
- return "", storage.ErrNotFound
- }
- return "", err
- }
- return string(name), nil
-}
-
-func (s *Store) putSchemaName(val string) error {
- return s.db.Put([]byte(dbSchemaKey), []byte(val), nil)
-}
-
// Close releases the resources used by the store.
func (s *Store) Close() error {
return s.db.Close()
diff --git a/pkg/statestore/leveldb/leveldb_test.go b/pkg/statestore/leveldb/leveldb_test.go
index 790e1debc0b..2e18b9f6b61 100644
--- a/pkg/statestore/leveldb/leveldb_test.go
+++ b/pkg/statestore/leveldb/leveldb_test.go
@@ -43,24 +43,3 @@ func TestPersistentStateStore(t *testing.T) {
return store
})
}
-
-func TestGetSchemaName(t *testing.T) {
- dir := t.TempDir()
-
- store, err := leveldb.NewStateStore(dir, log.Noop)
- if err != nil {
- t.Fatal(err)
- }
- t.Cleanup(func() {
- if err := store.Close(); err != nil {
- t.Fatal(err)
- }
- })
- n, err := store.GetSchemaName() // expect current
- if err != nil {
- t.Fatal(err)
- }
- if n != leveldb.DbSchemaCurrent {
- t.Fatalf("wanted current db schema but got '%s'", n)
- }
-}
diff --git a/pkg/statestore/leveldb/migration.go b/pkg/statestore/leveldb/migration.go
deleted file mode 100644
index 9fb57e9e27e..00000000000
--- a/pkg/statestore/leveldb/migration.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// nolint: goheader
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package leveldb
-
-import (
- "errors"
- "fmt"
- "strings"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethersphere/bee/pkg/storage"
-)
-
-var (
- errMissingCurrentSchema = errors.New("could not find current db schema")
- errMissingTargetSchema = errors.New("could not find target db schema")
-)
-
-const (
- dbSchemaKey = "statestore_schema"
- dbSchemaGrace = "grace"
- dbSchemaDrain = "drain"
- dbSchemaCleanInterval = "clean-interval"
- dbSchemaNoStamp = "no-stamp"
- dbSchemaFlushBlock = "flushblock"
- dbSchemaSwapAddr = "swapaddr"
- dBSchemaKademliaMetrics = "kademlia-metrics"
- dBSchemaBatchStore = "batchstore"
- dBSchemaBatchStoreV2 = "batchstoreV2"
- dBSchemaBatchStoreV3 = "batchstoreV3"
- dBSchemaBatchStoreV4 = "batchstoreV4"
- dBSchemaInterval = "interval"
- dBSchemaClearAddressBook = "address-book"
- dBSResetInterval = "interval-reset"
- dBSchemaBatchStoreV5 = "batchstoreV5"
-)
-
-var (
- dbSchemaCurrent = dBSchemaBatchStoreV5
-)
-
-type migration struct {
- name string // name of the schema
- fn func(s *Store) error // the migration function that needs to be performed in order to get to the current schema name
-}
-
-// schemaMigrations contains an ordered list of the database schemes, that is
-// in order to run data migrations in the correct sequence
-var schemaMigrations = []migration{
- {name: dbSchemaGrace, fn: func(s *Store) error { return nil }},
- {name: dbSchemaDrain, fn: migrateGrace},
- {name: dbSchemaCleanInterval, fn: migrateGrace},
- {name: dbSchemaNoStamp, fn: migrateStamp},
- {name: dbSchemaFlushBlock, fn: migrateFB},
- {name: dbSchemaSwapAddr, fn: migrateSwap},
- {name: dBSchemaKademliaMetrics, fn: migrateKademliaMetrics},
- {name: dBSchemaBatchStore, fn: migrateBatchstore},
- {name: dBSchemaBatchStoreV2, fn: migrateBatchstoreV2},
- {name: dBSchemaBatchStoreV3, fn: migrateBatchstore},
- {name: dBSchemaBatchStoreV4, fn: migrateBatchstore},
- {name: dBSchemaInterval, fn: noOpMigration},
- {name: dBSchemaClearAddressBook, fn: clearAddressBook},
- {name: dBSResetInterval, fn: clearIntervals},
- {name: dBSchemaBatchStoreV5, fn: migrateBatchstore},
-}
-
-func migrateFB(s *Store) error {
- collectedKeys, err := collectKeys(s, "blocklist-")
- if err != nil {
- return err
- }
- return deleteKeys(s, collectedKeys)
-}
-
-func migrateBatchstoreV2(s *Store) error {
- for _, pfx := range []string{"batchstore_", "verified_overlay_"} {
- collectedKeys, err := collectKeys(s, pfx)
- if err != nil {
- return err
- }
- if err := deleteKeys(s, collectedKeys); err != nil {
- return err
- }
- }
- return nil
-}
-
-func noOpMigration(s *Store) error {
- return nil
-}
-
-func clearAddressBook(s *Store) error {
- collectedKeys, err := collectKeys(s, "addressbook_entry_")
- if err != nil {
- return err
- }
- return deleteKeys(s, collectedKeys)
-}
-
-func clearIntervals(s *Store) error {
- collectedKeys, err := collectKeys(s, "sync|")
- if err != nil {
- return err
- }
- return deleteKeys(s, collectedKeys)
-}
-
-func migrateBatchstore(s *Store) error {
- collectedKeys, err := collectKeys(s, "batchstore_")
- if err != nil {
- return err
- }
- return deleteKeys(s, collectedKeys)
-}
-
-func migrateStamp(s *Store) error {
- for _, pfx := range []string{"postage", "batchstore", "addressbook_entry_"} {
- collectedKeys, err := collectKeys(s, pfx)
- if err != nil {
- return err
- }
- if err := deleteKeys(s, collectedKeys); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func migrateGrace(s *Store) error {
- var collectedKeys []string
- mgfn := func(k, v []byte) (bool, error) {
- stk := string(k)
- if strings.Contains(stk, "|") &&
- len(k) > 32 &&
- !strings.Contains(stk, "swap") &&
- !strings.Contains(stk, "peer") {
- s.logger.Debug("found key designated to deletion", "key", k)
- collectedKeys = append(collectedKeys, stk)
- }
-
- return false, nil
- }
-
- _ = s.Iterate("", mgfn)
-
- for _, v := range collectedKeys {
- err := s.Delete(v)
- if err != nil {
- s.logger.Debug("error deleting key", "key", v)
- continue
- }
- s.logger.Debug("deleted key", "key", v)
- }
- s.logger.Debug("keys deleted", "count", len(collectedKeys))
-
- return nil
-}
-
-func migrateSwap(s *Store) error {
- migratePrefix := func(prefix string) error {
- keys, err := collectKeys(s, prefix)
- if err != nil {
- return err
- }
-
- for _, key := range keys {
- split := strings.SplitAfter(key, prefix)
- if len(split) != 2 {
- return errors.New("no peer in key")
- }
-
- if len(split[1]) != 20 {
- s.logger.Debug("skipping already migrated key", "key", key)
- continue
- }
-
- addr := common.BytesToAddress([]byte(split[1]))
- fixed := fmt.Sprintf("%s%x", prefix, addr)
-
- var val string
- if err = s.Get(fixed, &val); err == nil {
- s.logger.Debug("skipping duplicate key", "key", key)
- if err = s.Delete(key); err != nil {
- return err
- }
- continue
- }
- if !errors.Is(err, storage.ErrNotFound) {
- return err
- }
-
- if err = s.Get(key, &val); err != nil {
- return err
- }
-
- if err = s.Put(fixed, val); err != nil {
- return err
- }
-
- if err = s.Delete(key); err != nil {
- return err
- }
- }
- return nil
- }
-
- if err := migratePrefix("swap_peer_chequebook_"); err != nil {
- return err
- }
-
- return migratePrefix("swap_beneficiary_peer_")
-}
-
-// migrateKademliaMetrics removes all old existing
-// kademlia metrics database content.
-func migrateKademliaMetrics(s *Store) error {
- for _, prefix := range []string{"peer-last-seen-timestamp", "peer-total-connection-duration"} {
- start := time.Now()
- s.logger.Debug("removing kademlia metrics", "metrics_prefix", prefix)
-
- keys, err := collectKeys(s, prefix)
- if err != nil {
- return err
- }
-
- if err := deleteKeys(s, keys); err != nil {
- return err
- }
-
- s.logger.Debug("removing kademlia metrics done", "metrics_prefix", prefix, "elapsed", time.Since(start))
- }
- return nil
-}
-
-func (s *Store) migrate(schemaName string) error {
- migrations, err := getMigrations(schemaName, dbSchemaCurrent, schemaMigrations, s)
- if err != nil {
- return fmt.Errorf("error getting migrations for current schema (%s): %w", schemaName, err)
- }
-
- // no migrations to run
- if migrations == nil {
- return nil
- }
-
- s.logger.Debug("statestore: need to run data migrations to schema", "migration_count", len(migrations), "schema_name", schemaName)
- for i := 0; i < len(migrations); i++ {
- err := migrations[i].fn(s)
- if err != nil {
- return err
- }
- err = s.putSchemaName(migrations[i].name) // put the name of the current schema
- if err != nil {
- return err
- }
- schemaName, err = s.getSchemaName()
- if err != nil {
- return err
- }
- s.logger.Debug("statestore: successfully ran migration", "migration_number", i, "schema_name", schemaName)
- }
- return nil
-}
-
-// getMigrations returns an ordered list of migrations that need be executed
-// with no errors in order to bring the statestore to the most up-to-date
-// schema definition
-func getMigrations(currentSchema, targetSchema string, allSchemeMigrations []migration, store *Store) (migrations []migration, err error) {
- foundCurrent := false
- foundTarget := false
- if currentSchema == dbSchemaCurrent {
- return nil, nil
- }
- for i, v := range allSchemeMigrations {
- switch v.name {
- case currentSchema:
- if foundCurrent {
- return nil, errors.New("found schema name for the second time when looking for migrations")
- }
- foundCurrent = true
- store.logger.Debug("statestore migration: migrating schema", "current_schema_name", currentSchema, "next_schema_name", dbSchemaCurrent, "total_migration_count", len(allSchemeMigrations)-i)
- continue // current schema migration should not be executed (already has been when schema was migrated to)
- case targetSchema:
- foundTarget = true
- }
- if foundCurrent {
- migrations = append(migrations, v)
- }
- }
- if !foundCurrent {
- return nil, errMissingCurrentSchema
- }
- if !foundTarget {
- return nil, errMissingTargetSchema
- }
- return migrations, nil
-}
-
-func collectKeysExcept(s *Store, prefixesToPreserve []string) (keys []string, err error) {
- if err := s.Iterate("", func(k, v []byte) (bool, error) {
- stk := string(k)
- has := false
- for _, v := range prefixesToPreserve {
- if strings.HasPrefix(stk, v) {
- has = true
- break
- }
- }
- if !has {
- keys = append(keys, stk)
- }
- return false, nil
- }); err != nil {
- return nil, err
- }
- return keys, nil
-}
-
-func collectKeys(s *Store, prefix string) (keys []string, err error) {
- if err := s.Iterate(prefix, func(k, v []byte) (bool, error) {
- stk := string(k)
- if strings.HasPrefix(stk, prefix) {
- keys = append(keys, stk)
- }
- return false, nil
- }); err != nil {
- return nil, err
- }
- return keys, nil
-}
-
-func deleteKeys(s *Store, keys []string) error {
- for _, v := range keys {
- err := s.Delete(v)
- if err != nil {
- return fmt.Errorf("error deleting key %s: %w", v, err)
- }
- }
- s.logger.Debug("keys deleted", "count", len(keys))
- return nil
-}
-
-// Nuke the store so that only the bare essential entries are
-// left. Careful!
-func (s *Store) Nuke() error {
- var (
- prefixesToPreserve = []string{
- "non-mineable-overlay",
- "overlayV2_nonce",
- "pseudosettle",
- "accounting",
- "swap",
- }
- keys []string
- err error
- )
-
- keys, err = collectKeysExcept(s, prefixesToPreserve)
- if err != nil {
- return fmt.Errorf("collect keys except: %w", err)
- }
- return deleteKeys(s, keys)
-}
diff --git a/pkg/statestore/leveldb/migration_test.go b/pkg/statestore/leveldb/migration_test.go
deleted file mode 100644
index 8ae436f0fe3..00000000000
--- a/pkg/statestore/leveldb/migration_test.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// nolint: goheader
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package leveldb
-
-import (
- "errors"
- "fmt"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethersphere/bee/pkg/log"
- "github.com/ethersphere/bee/pkg/storage"
-)
-
-func TestOneMigration(t *testing.T) {
- defer func(v []migration, s string) {
- schemaMigrations = v
- dbSchemaCurrent = s
- }(schemaMigrations, dbSchemaCurrent)
-
- dbSchemaCode := "code"
- dbSchemaCurrent = dbSchemaCode
- dbSchemaNext := "dbSchemaNext"
-
- ran := false
- shouldNotRun := false
- schemaMigrations = []migration{
- {name: dbSchemaCode, fn: func(db *Store) error {
- shouldNotRun = true // this should not be executed
- return nil
- }},
- {name: dbSchemaNext, fn: func(db *Store) error {
- ran = true
- return nil
- }},
- }
-
- dir := t.TempDir()
- logger := log.Noop
-
- // start the fresh statestore with the sanctuary schema name
- db, err := NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- err = db.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- dbSchemaCurrent = dbSchemaNext
-
- // start the existing statestore and expect the migration to run
- db, err = NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- schemaName, err := db.GetSchemaName()
- if err != nil {
- t.Fatal(err)
- }
-
- if schemaName != dbSchemaNext {
- t.Errorf("schema name mismatch. got '%s', want '%s'", schemaName, dbSchemaNext)
- }
-
- if !ran {
- t.Error("expected migration did not run")
- }
-
- if shouldNotRun {
- t.Error("migration ran but shouldnt have")
- }
-
- err = db.Close()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestManyMigrations(t *testing.T) {
- defer func(v []migration, s string) {
- schemaMigrations = v
- dbSchemaCurrent = s
- }(schemaMigrations, dbSchemaCurrent)
-
- dbSchemaCode := "code"
- dbSchemaCurrent = dbSchemaCode
-
- shouldNotRun := false
- executionOrder := []int{-1, -1, -1, -1}
-
- schemaMigrations = []migration{
- {name: dbSchemaCode, fn: func(db *Store) error {
- shouldNotRun = true // this should not be executed
- return nil
- }},
- {name: "keju", fn: func(db *Store) error {
- executionOrder[0] = 0
- return nil
- }},
- {name: "coconut", fn: func(db *Store) error {
- executionOrder[1] = 1
- return nil
- }},
- {name: "mango", fn: func(db *Store) error {
- executionOrder[2] = 2
- return nil
- }},
- {name: "salvation", fn: func(db *Store) error {
- executionOrder[3] = 3
- return nil
- }},
- }
-
- dir := t.TempDir()
- logger := log.Noop
-
- // start the fresh statestore with the sanctuary schema name
- db, err := NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- err = db.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- dbSchemaCurrent = "salvation"
-
- // start the existing statestore and expect the migration to run
- db, err = NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- schemaName, err := db.GetSchemaName()
- if err != nil {
- t.Fatal(err)
- }
-
- if schemaName != "salvation" {
- t.Errorf("schema name mismatch. got '%s', want '%s'", schemaName, "salvation")
- }
-
- if shouldNotRun {
- t.Error("migration ran but shouldnt have")
- }
-
- for i, v := range executionOrder {
- if i != v && i != len(executionOrder)-1 {
- t.Errorf("migration did not run in sequence, slot %d value %d", i, v)
- }
- }
-
- err = db.Close()
- if err != nil {
- t.Error(err)
- }
-}
-
-// TestMigrationErrorFrom checks that local store boot should fail when the schema we're migrating from cannot be found
-func TestMigrationErrorFrom(t *testing.T) {
- defer func(v []migration, s string) {
- schemaMigrations = v
- dbSchemaCurrent = s
- }(schemaMigrations, dbSchemaCurrent)
-
- dbSchemaCurrent = "koo-koo-schema"
-
- shouldNotRun := false
- schemaMigrations = []migration{
- {name: "langur", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- {name: "coconut", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- {name: "chutney", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- }
- dir := t.TempDir()
- logger := log.Noop
-
- // start the fresh statestore with the sanctuary schema name
- db, err := NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- err = db.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- dbSchemaCurrent = "foo"
-
- // start the existing statestore and expect the migration to run
- _, err = NewStateStore(dir, logger)
- if !errors.Is(err, errMissingCurrentSchema) {
- t.Fatalf("expected errCannotFindSchema but got %v", err)
- }
-
- if shouldNotRun {
- t.Error("migration ran but shouldnt have")
- }
-}
-
-// TestMigrationErrorTo checks that local store boot should fail when the schema we're migrating to cannot be found
-func TestMigrationErrorTo(t *testing.T) {
- defer func(v []migration, s string) {
- schemaMigrations = v
- dbSchemaCurrent = s
- }(schemaMigrations, dbSchemaCurrent)
-
- dbSchemaCurrent = "langur"
-
- shouldNotRun := false
- schemaMigrations = []migration{
- {name: "langur", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- {name: "coconut", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- {name: "chutney", fn: func(db *Store) error {
- shouldNotRun = true
- return nil
- }},
- }
- dir := t.TempDir()
- logger := log.Noop
-
- // start the fresh statestore with the sanctuary schema name
- db, err := NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- err = db.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- dbSchemaCurrent = "foo"
-
- // start the existing statestore and expect the migration to run
- _, err = NewStateStore(dir, logger)
- if !errors.Is(err, errMissingTargetSchema) {
- t.Fatalf("expected errMissingTargetSchema but got %v", err)
- }
-
- if shouldNotRun {
- t.Error("migration ran but shouldnt have")
- }
-}
-
-func TestMigrationSwap(t *testing.T) {
- dir := t.TempDir()
- logger := log.Noop
-
- // start the fresh statestore with the sanctuary schema name
- db, err := NewStateStore(dir, logger)
- if err != nil {
- t.Fatal(err)
- }
- defer db.Close()
-
- address := common.HexToAddress("0xabcd")
- storedAddress := common.HexToAddress("0xffff")
-
- legacyKey1 := fmt.Sprintf("swap_peer_chequebook_%s", address[:])
- legacyKey2 := fmt.Sprintf("swap_beneficiary_peer_%s", address[:])
-
- if err = db.Put(legacyKey1, storedAddress); err != nil {
- t.Fatal(err)
- }
-
- if err = db.Put(legacyKey2, storedAddress); err != nil {
- t.Fatal(err)
- }
-
- if err = migrateSwap(db); err != nil {
- t.Fatal(err)
- }
-
- var retrievedAddress common.Address
- if err = db.Get("swap_peer_chequebook_000000000000000000000000000000000000abcd", &retrievedAddress); err != nil {
- t.Fatal(err)
- }
-
- if retrievedAddress != storedAddress {
- t.Fatalf("got wrong address. wanted %x, got %x", storedAddress, retrievedAddress)
- }
-
- if err = db.Get("swap_beneficiary_peer_000000000000000000000000000000000000abcd", &retrievedAddress); err != nil {
- t.Fatal(err)
- }
-
- if retrievedAddress != storedAddress {
- t.Fatalf("got wrong address. wanted %x, got %x", storedAddress, retrievedAddress)
- }
-
- if err = db.Get(legacyKey1, &retrievedAddress); !errors.Is(err, storage.ErrNotFound) {
- t.Fatalf("legacyKey1 not deleted. got error %v", err)
- }
-
- if err = db.Get(legacyKey2, &retrievedAddress); !errors.Is(err, storage.ErrNotFound) {
- t.Fatalf("legacyKey2 not deleted. got error %v", err)
- }
-}
diff --git a/pkg/statestore/mock/store.go b/pkg/statestore/mock/store.go
index c1150260c2e..964c5c4ef61 100644
--- a/pkg/statestore/mock/store.go
+++ b/pkg/statestore/mock/store.go
@@ -7,7 +7,6 @@ package mock
import (
"encoding"
"encoding/json"
- "fmt"
"strings"
"sync"
@@ -16,8 +15,6 @@ import (
var _ storage.StateStorer = (*store)(nil)
-const mockSchemaNameKey = "schema_name"
-
type store struct {
store map[string][]byte
mtx sync.RWMutex
@@ -28,10 +25,6 @@ func NewStateStore() storage.StateStorer {
store: make(map[string][]byte),
}
- if err := s.Put(mockSchemaNameKey, "mock_schema"); err != nil {
- panic(fmt.Errorf("put schema name: %w", err))
- }
-
return s
}
diff --git a/pkg/statestore/storeadapter/migration.go b/pkg/statestore/storeadapter/migration.go
index 52eceaf4335..171f31ccf57 100644
--- a/pkg/statestore/storeadapter/migration.go
+++ b/pkg/statestore/storeadapter/migration.go
@@ -19,6 +19,7 @@ func allSteps() migration.Steps {
4: deletePrefix("blocklist"),
5: deletePrefix("batchstore"),
6: deletePrefix("sync_interval"),
+ 7: deletePrefix("sync_interval"),
}
}
diff --git a/pkg/statestore/storeadapter/storeadapter_test.go b/pkg/statestore/storeadapter/storeadapter_test.go
index 7364a297ed4..5065a5158da 100644
--- a/pkg/statestore/storeadapter/storeadapter_test.go
+++ b/pkg/statestore/storeadapter/storeadapter_test.go
@@ -30,12 +30,6 @@ func TestStateStoreAdapter(t *testing.T) {
}
})
- // The test requires the state store to have
- // a schema, otherwise the delete test fails.
- if err := store.Put("test_schema", "name"); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
return store
})
diff --git a/pkg/statestore/test/store.go b/pkg/statestore/test/store.go
index 11c7acb9fe5..ee5050b8633 100644
--- a/pkg/statestore/test/store.go
+++ b/pkg/statestore/test/store.go
@@ -225,5 +225,5 @@ func testStoreIterator(t *testing.T, store storage.StateStorer, prefix string, s
func testEmpty(t *testing.T, store storage.StateStorer) {
t.Helper()
- testStoreIterator(t, store, "", 1) // 1 because of the schema entry.
+ testStoreIterator(t, store, "", 0)
}
diff --git a/pkg/storage/inmemchunkstore/inmemchunkstore.go b/pkg/storage/inmemchunkstore/inmemchunkstore.go
index 8f1dcb77a7d..0f225db0608 100644
--- a/pkg/storage/inmemchunkstore/inmemchunkstore.go
+++ b/pkg/storage/inmemchunkstore/inmemchunkstore.go
@@ -14,12 +14,17 @@ import (
type ChunkStore struct {
mu sync.Mutex
- chunks map[string]swarm.Chunk
+ chunks map[string]chunkCount
+}
+
+type chunkCount struct {
+ chunk swarm.Chunk
+ count int
}
func New() *ChunkStore {
return &ChunkStore{
- chunks: make(map[string]swarm.Chunk),
+ chunks: make(map[string]chunkCount),
}
}
@@ -31,18 +36,19 @@ func (c *ChunkStore) Get(_ context.Context, addr swarm.Address) (swarm.Chunk, er
if !ok {
return nil, storage.ErrNotFound
}
- return chunk, nil
+ return chunk.chunk, nil
}
func (c *ChunkStore) Put(_ context.Context, ch swarm.Chunk) error {
c.mu.Lock()
defer c.mu.Unlock()
- chunk, ok := c.chunks[ch.Address().ByteString()]
+ chunkCount, ok := c.chunks[ch.Address().ByteString()]
if !ok {
- chunk = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp())
+ chunkCount.chunk = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp())
}
- c.chunks[ch.Address().ByteString()] = chunk
+ chunkCount.count++
+ c.chunks[ch.Address().ByteString()] = chunkCount
return nil
}
@@ -60,7 +66,13 @@ func (c *ChunkStore) Delete(_ context.Context, addr swarm.Address) error {
c.mu.Lock()
defer c.mu.Unlock()
- delete(c.chunks, addr.ByteString())
+ chunkCount := c.chunks[addr.ByteString()]
+ chunkCount.count--
+ if chunkCount.count <= 0 {
+ delete(c.chunks, addr.ByteString())
+ } else {
+ c.chunks[addr.ByteString()] = chunkCount
+ }
return nil
}
@@ -69,8 +81,8 @@ func (c *ChunkStore) Iterate(_ context.Context, fn storage.IterateChunkFn) error
c.mu.Lock()
defer c.mu.Unlock()
- for _, chunk := range c.chunks {
- stop, err := fn(chunk)
+ for _, chunkCount := range c.chunks {
+ stop, err := fn(chunkCount.chunk)
if err != nil {
return err
}
diff --git a/pkg/storage/storagetest/chunkstore.go b/pkg/storage/storagetest/chunkstore.go
index dbd3e4d79a3..767c4a2a0c4 100644
--- a/pkg/storage/storagetest/chunkstore.go
+++ b/pkg/storage/storagetest/chunkstore.go
@@ -95,6 +95,15 @@ func TestChunkStore(t *testing.T, st storage.ChunkStore) {
if err != nil {
t.Fatalf("failed deleting chunk: %v", err)
}
+ _, err = st.Get(context.TODO(), ch.Address())
+ if err != nil {
+ t.Fatalf("expected no error, found: %v", err)
+ }
+ // delete twice as it was put twice
+ err = st.Delete(context.TODO(), ch.Address())
+ if err != nil {
+ t.Fatalf("failed deleting chunk: %v", err)
+ }
}
}
})
diff --git a/pkg/storageincentives/proof_test.go b/pkg/storageincentives/proof_test.go
index dcd7002f913..1d83309ba24 100644
--- a/pkg/storageincentives/proof_test.go
+++ b/pkg/storageincentives/proof_test.go
@@ -26,7 +26,7 @@ import (
)
// Test asserts valid case for MakeInclusionProofs.
-func TestMakeInclusionProofs(t *testing.T) {
+func TestMakeInclusionProofs_FLAKY(t *testing.T) {
t.Parallel()
anchor := testutil.RandBytes(t, 1)
@@ -43,7 +43,9 @@ var testData []byte
// Test asserts that MakeInclusionProofs will generate the same
// output for given sample.
-func TestMakeInclusionProofsRegression(t *testing.T) {
+func TestMakeInclusionProofsRegression_FLAKY(t *testing.T) {
+ t.Parallel()
+
const sampleSize = 16
keyRaw := `00000000000000000000000000000000`
diff --git a/pkg/storageincentives/staking/contract.go b/pkg/storageincentives/staking/contract.go
index 014b1a67adc..128343824a2 100644
--- a/pkg/storageincentives/staking/contract.go
+++ b/pkg/storageincentives/staking/contract.go
@@ -23,7 +23,7 @@ import (
var (
MinimumStakeAmount = big.NewInt(100000000000000000)
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_3_1)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
ErrInsufficientStakeAmount = errors.New("insufficient stake amount")
ErrInsufficientFunds = errors.New("insufficient token balance")
diff --git a/pkg/storer/debug.go b/pkg/storer/debug.go
index f78c866c906..9128141a430 100644
--- a/pkg/storer/debug.go
+++ b/pkg/storer/debug.go
@@ -16,8 +16,8 @@ import (
)
type UploadStat struct {
- TotalUploaded int
- TotalSynced int
+ TotalUploaded uint64
+ TotalSynced uint64
}
type PinningStat struct {
@@ -80,28 +80,22 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) {
})
var (
- uploaded int
- synced int
+ uploaded uint64
+ synced uint64
)
eg.Go(func() error {
- return upload.IterateAll(
- db.repo.IndexStore(),
- func(_ swarm.Address, isSynced bool) (bool, error) {
- select {
- case <-ctx.Done():
- return true, ctx.Err()
- case <-db.quit:
- return true, ErrDBQuit
- default:
- }
-
- uploaded++
- if isSynced {
- synced++
- }
- return false, nil
- },
- )
+ return upload.IterateAllTagItems(db.repo.IndexStore(), func(ti *upload.TagItem) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return true, ctx.Err()
+ case <-db.quit:
+ return true, ErrDBQuit
+ default:
+ }
+ uploaded += ti.Split
+ synced += ti.Synced
+ return false, nil
+ })
})
var (
diff --git a/pkg/storer/epoch_migration_test.go b/pkg/storer/epoch_migration_test.go
deleted file mode 100644
index b51451466f2..00000000000
--- a/pkg/storer/epoch_migration_test.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package storer_test
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "fmt"
- "io"
- "io/fs"
- "os"
- "path"
- "path/filepath"
- "strings"
- "sync"
- "testing"
-
- "github.com/ethersphere/bee/pkg/file/splitter"
- "github.com/ethersphere/bee/pkg/log"
- postagetesting "github.com/ethersphere/bee/pkg/postage/testing"
- "github.com/ethersphere/bee/pkg/sharky"
- "github.com/ethersphere/bee/pkg/shed"
- mockstatestore "github.com/ethersphere/bee/pkg/statestore/mock"
- storage "github.com/ethersphere/bee/pkg/storage"
- "github.com/ethersphere/bee/pkg/storage/inmemstore"
- chunktest "github.com/ethersphere/bee/pkg/storage/testing"
- storer "github.com/ethersphere/bee/pkg/storer"
- "github.com/ethersphere/bee/pkg/storer/internal"
- pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning"
- "github.com/ethersphere/bee/pkg/swarm"
-)
-
-type dirFS struct {
- basedir string
-}
-
-func (d *dirFS) Open(path string) (fs.File, error) {
- return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644)
-}
-
-func createOldDataDir(t *testing.T, dataPath string, baseAddress swarm.Address, stateStore storage.StateStorer) {
- t.Helper()
-
- binIDs := map[uint8]int{}
-
- assignBinID := func(addr swarm.Address) int {
- po := swarm.Proximity(baseAddress.Bytes(), addr.Bytes())
- if _, ok := binIDs[po]; !ok {
- binIDs[po] = 1
- return 1
- }
- binIDs[po]++
- return binIDs[po]
- }
-
- err := os.Mkdir(filepath.Join(dataPath, "sharky"), 0777)
- if err != nil {
- t.Fatal(err)
- }
-
- sharkyStore, err := sharky.New(&dirFS{basedir: filepath.Join(dataPath, "sharky")}, 2, swarm.SocMaxChunkSize)
- if err != nil {
- t.Fatal(err)
- }
- defer sharkyStore.Close()
-
- shedDB, err := shed.NewDB(dataPath, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer shedDB.Close()
-
- pIdx, rIdx, err := storer.InitShedIndexes(shedDB, baseAddress)
- if err != nil {
- t.Fatal(err)
- }
-
- reserveChunks := chunktest.GenerateTestRandomChunks(10)
-
- for _, c := range reserveChunks {
- loc, err := sharkyStore.Write(context.Background(), c.Data())
- if err != nil {
- t.Fatal(err)
- }
-
- locBuf, err := loc.MarshalBinary()
- if err != nil {
- t.Fatal(err)
- }
-
- binID := assignBinID(c.Address())
-
- err = pIdx.Put(shed.Item{
- Address: c.Address().Bytes(),
- BinID: uint64(binID),
- BatchID: c.Stamp().BatchID(),
- })
- if err != nil {
- t.Fatal(err)
- }
-
- err = rIdx.Put(shed.Item{
- Address: c.Address().Bytes(),
- BinID: uint64(binID),
- BatchID: c.Stamp().BatchID(),
- Index: c.Stamp().Index(),
- Timestamp: c.Stamp().Timestamp(),
- Sig: c.Stamp().Sig(),
- Location: locBuf,
- })
-
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // create a pinning collection
- writer := splitter.NewSimpleSplitter(
- storage.PutterFunc(
- func(ctx context.Context, chunk swarm.Chunk) error {
- c := chunk.WithStamp(postagetesting.MustNewStamp())
-
- loc, err := sharkyStore.Write(context.Background(), c.Data())
- if err != nil {
- return err
- }
-
- locBuf, err := loc.MarshalBinary()
- if err != nil {
- return err
- }
-
- return rIdx.Put(shed.Item{
- Address: c.Address().Bytes(),
- BatchID: c.Stamp().BatchID(),
- Index: c.Stamp().Index(),
- Timestamp: c.Stamp().Timestamp(),
- Sig: c.Stamp().Sig(),
- Location: locBuf,
- })
- },
- ),
- )
-
- randData := make([]byte, 4096*20)
- _, err = rand.Read(randData)
- if err != nil {
- t.Fatal(err)
- }
-
- root, err := writer.Split(context.Background(), io.NopCloser(bytes.NewBuffer(randData)), 4096*20, false)
- if err != nil {
- t.Fatal(err)
- }
-
- err = stateStore.Put(fmt.Sprintf("root-pin-%s", root.String()), root)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-type testSharkyRecovery struct {
- *sharky.Recovery
- mtx sync.Mutex
- addCalls int
-}
-
-func (t *testSharkyRecovery) Add(loc sharky.Location) error {
- t.mtx.Lock()
- t.addCalls++
- t.mtx.Unlock()
- return t.Recovery.Add(loc)
-}
-
-type testReservePutter struct {
- mtx sync.Mutex
- size int
- calls int
-}
-
-func (t *testReservePutter) Put(ctx context.Context, st internal.Storage, ch swarm.Chunk) (bool, error) {
- t.mtx.Lock()
- t.calls++
- t.mtx.Unlock()
- return true, st.ChunkStore().Put(ctx, ch)
-}
-
-func (t *testReservePutter) AddSize(size int) {
- t.mtx.Lock()
- t.size += size
- t.mtx.Unlock()
-}
-
-func (t *testReservePutter) Size() int {
- t.mtx.Lock()
- defer t.mtx.Unlock()
- return t.size
-}
-
-// TestEpochMigration_FLAKY is flaky on windows.
-func TestEpochMigration_FLAKY(t *testing.T) {
- t.Parallel()
-
- var (
- dataPath = t.TempDir()
- baseAddress = swarm.RandAddress(t)
- stateStore = mockstatestore.NewStateStore()
- reserve = &testReservePutter{}
- logBytes = bytes.NewBuffer(nil)
- logger = log.NewLogger("test", log.WithSink(logBytes))
- indexStore = inmemstore.New()
- )
-
- createOldDataDir(t, dataPath, baseAddress, stateStore)
-
- r, err := sharky.NewRecovery(path.Join(dataPath, "sharky"), 2, swarm.SocMaxChunkSize)
- if err != nil {
- t.Fatal(err)
- }
-
- sharkyRecovery := &testSharkyRecovery{Recovery: r}
-
- err = storer.EpochMigration(
- context.Background(),
- dataPath,
- stateStore,
- indexStore,
- reserve,
- sharkyRecovery,
- logger,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- if !strings.Contains(logBytes.String(), "migrating pinning collections done") {
- t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String())
- }
-
- if !strings.Contains(logBytes.String(), "migrating reserve contents done") {
- t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String())
- }
-
- if sharkyRecovery.addCalls != 31 {
- t.Fatalf("expected 31 add calls, got %d", sharkyRecovery.addCalls)
- }
-
- if reserve.calls != 10 {
- t.Fatalf("expected 10 reserve calls, got %d", reserve.calls)
- }
-
- if reserve.size != 10 {
- t.Fatalf("expected 10 reserve size, got %d", reserve.size)
- }
-
- pins, err := pinstore.Pins(indexStore)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(pins) != 1 {
- t.Fatalf("expected 1 pin, got %d", len(pins))
- }
-
- if !strings.Contains(logBytes.String(), pins[0].String()) {
- t.Fatalf("expected log to contain root pin reference, got %s", logBytes.String())
- }
-}
-
-func TestEpochMigrationLightNode(t *testing.T) {
- t.Parallel()
-
- var (
- dataPath = t.TempDir()
- baseAddress = swarm.RandAddress(t)
- stateStore = mockstatestore.NewStateStore()
- reserve storer.ReservePutter
- logBytes = bytes.NewBuffer(nil)
- logger = log.NewLogger("test", log.WithSink(logBytes))
- indexStore = inmemstore.New()
- )
-
- createOldDataDir(t, dataPath, baseAddress, stateStore)
-
- r, err := sharky.NewRecovery(path.Join(dataPath, "sharky"), 2, swarm.SocMaxChunkSize)
- if err != nil {
- t.Fatal(err)
- }
-
- sharkyRecovery := &testSharkyRecovery{Recovery: r}
-
- err = storer.EpochMigration(
- context.Background(),
- dataPath,
- stateStore,
- indexStore,
- reserve,
- sharkyRecovery,
- logger,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- if !strings.Contains(logBytes.String(), "migrating pinning collections done") {
- t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String())
- }
-
- if strings.Contains(logBytes.String(), "migrating reserve contents done") {
- t.Fatalf("expected log to not contain 'migrating reserve contents done', got %s", logBytes.String())
- }
-
- if sharkyRecovery.addCalls != 21 {
- t.Fatalf("expected 31 add calls, got %d", sharkyRecovery.addCalls)
- }
-
- pins, err := pinstore.Pins(indexStore)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(pins) != 1 {
- t.Fatalf("expected 1 pin, got %d", len(pins))
- }
-
- if !strings.ContainsAny(logBytes.String(), pins[0].String()) {
- t.Fatalf("expected log to contain root pin reference, got %s", logBytes.String())
- }
-}
diff --git a/pkg/storer/export_test.go b/pkg/storer/export_test.go
index 7bdc2198e10..0613e467831 100644
--- a/pkg/storer/export_test.go
+++ b/pkg/storer/export_test.go
@@ -12,15 +12,6 @@ import (
"github.com/ethersphere/bee/pkg/storer/internal/reserve"
)
-var (
- InitShedIndexes = initShedIndexes
- EpochMigration = epochMigration
-)
-
-type (
- ReservePutter = reservePutter
-)
-
func (db *DB) Reserve() *reserve.Reserve {
return db.reserve
}
diff --git a/pkg/storer/internal/cache/cache.go b/pkg/storer/internal/cache/cache.go
index 354219cbea7..887f2a27ae9 100644
--- a/pkg/storer/internal/cache/cache.go
+++ b/pkg/storer/internal/cache/cache.go
@@ -10,12 +10,14 @@ import (
"errors"
"fmt"
"strconv"
+ "sync"
"sync/atomic"
"time"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storer/internal"
"github.com/ethersphere/bee/pkg/swarm"
+ "resenje.org/multex"
)
var now = time.Now
@@ -37,8 +39,10 @@ var (
// part of the reserve but are potentially useful to store for obtaining bandwidth
// incentives.
type Cache struct {
- size atomic.Int64
- capacity int
+ size atomic.Int64
+ capacity int
+ chunkLock *multex.Multex // protects storage ops at chunk level
+ glock sync.RWMutex // blocks Get and Put ops while shallow copy is running.
}
// New creates a new Cache component with the specified capacity. The store is used
@@ -52,6 +56,7 @@ func New(ctx context.Context, store internal.Storage, capacity uint64) (*Cache,
c := &Cache{capacity: int(capacity)}
c.size.Store(int64(count))
+ c.chunkLock = multex.New()
return c, nil
}
@@ -69,6 +74,11 @@ func (c *Cache) Capacity() uint64 { return uint64(c.capacity) }
func (c *Cache) Putter(store internal.Storage) storage.Putter {
return storage.PutterFunc(func(ctx context.Context, chunk swarm.Chunk) error {
+ c.chunkLock.Lock(chunk.Address().ByteString())
+ defer c.chunkLock.Unlock(chunk.Address().ByteString())
+ c.glock.RLock()
+ defer c.glock.RUnlock()
+
newEntry := &cacheEntry{Address: chunk.Address()}
found, err := store.IndexStore().Has(newEntry)
if err != nil {
@@ -126,6 +136,11 @@ func (c *Cache) Getter(store internal.Storage) storage.Getter {
return nil, err
}
+ c.chunkLock.Lock(address.ByteString())
+ defer c.chunkLock.Unlock(address.ByteString())
+ c.glock.RLock()
+ defer c.glock.RUnlock()
+
// check if there is an entry in Cache. As this is the download path, we do
// a best-effort operation. So in case of any error we return the chunk.
entry := &cacheEntry{Address: address}
@@ -180,41 +195,49 @@ func (c *Cache) ShallowCopy(
addrs ...swarm.Address,
) (err error) {
+ c.glock.Lock()
+ defer c.glock.Unlock()
+
+ entries := make([]*cacheEntry, 0, len(addrs))
+
defer func() {
if err != nil {
- for _, addr := range addrs {
- err = errors.Join(store.ChunkStore().Delete(context.Background(), addr))
+ for _, entry := range entries {
+ err = errors.Join(store.ChunkStore().Delete(context.Background(), entry.Address))
}
}
}()
- //consider only the amount that can fit, the rest should be deleted from the chunkstore.
- if len(addrs) > c.capacity {
- for _, addr := range addrs[:len(addrs)-c.capacity] {
- _ = store.ChunkStore().Delete(ctx, addr)
- }
- addrs = addrs[len(addrs)-c.capacity:]
- }
-
- entriesToAdd := make([]*cacheEntry, 0, len(addrs))
for _, addr := range addrs {
entry := &cacheEntry{Address: addr, AccessTimestamp: now().UnixNano()}
if has, err := store.IndexStore().Has(entry); err == nil && has {
+ // Since the caller has previously referenced the chunk (+1 refCnt), and if the chunk is already referenced
+ // by the cache store (+1 refCnt), then we must decrement the refCnt by one ( -1 refCnt to bring the total to +1).
+ // See https://github.com/ethersphere/bee/issues/4530.
+ _ = store.ChunkStore().Delete(ctx, addr)
continue
}
- entriesToAdd = append(entriesToAdd, entry)
+ entries = append(entries, entry)
}
- if len(entriesToAdd) == 0 {
+ if len(entries) == 0 {
return nil
}
+ //consider only the amount that can fit, the rest should be deleted from the chunkstore.
+ if len(entries) > c.capacity {
+ for _, addr := range entries[:len(entries)-c.capacity] {
+ _ = store.ChunkStore().Delete(ctx, addr.Address)
+ }
+ entries = entries[len(entries)-c.capacity:]
+ }
+
batch, err := store.IndexStore().Batch(ctx)
if err != nil {
return fmt.Errorf("failed creating batch: %w", err)
}
- for _, entry := range entriesToAdd {
+ for _, entry := range entries {
err = batch.Put(entry)
if err != nil {
return fmt.Errorf("failed adding entry %s: %w", entry, err)
@@ -232,19 +255,19 @@ func (c *Cache) ShallowCopy(
return fmt.Errorf("batch commit: %w", err)
}
- c.size.Add(int64(len(entriesToAdd)))
+ c.size.Add(int64(len(entries)))
return nil
}
// RemoveOldest removes the oldest cache entries from the store. The count
// specifies the number of entries to remove.
-func (c *Cache) RemoveOldest(
- ctx context.Context,
- store internal.Storage,
- chStore storage.ChunkStore,
- count uint64,
-) error {
+func (c *Cache) RemoveOldest(ctx context.Context, store internal.Storage, chStore storage.ChunkStore, count uint64) error {
+ return c.removeOldest(ctx, store, store.ChunkStore(), count, 1000)
+}
+
+func (c *Cache) removeOldest(ctx context.Context, store internal.Storage, chStore storage.ChunkStore, count uint64, batchCnt int) error {
+
if count <= 0 {
return nil
}
@@ -273,7 +296,8 @@ func (c *Cache) RemoveOldest(
return fmt.Errorf("failed iterating over cache order index: %w", err)
}
- batchCnt := 1_000
+ c.glock.Lock()
+ defer c.glock.Unlock()
for i := 0; i < len(evictItems); i += batchCnt {
end := i + batchCnt
@@ -309,7 +333,7 @@ func (c *Cache) RemoveOldest(
return err
}
- c.size.Add(-int64(len(evictItems)))
+ c.size.Add(-int64(end - i))
}
return nil
diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go
index 6450dc71198..c331853d352 100644
--- a/pkg/storer/internal/cache/cache_test.go
+++ b/pkg/storer/internal/cache/cache_test.go
@@ -347,6 +347,37 @@ func TestCache(t *testing.T) {
})
}
+func TestRemoveOldest(t *testing.T) {
+ t.Parallel()
+
+ st := newTestStorage(t)
+ c, err := cache.New(context.Background(), st, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ chunks := chunktest.GenerateTestRandomChunks(30)
+
+ for _, ch := range chunks {
+ err = c.Putter(st).Put(context.Background(), ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[29].Address(), 30)
+ verifyCacheOrder(t, c, st.IndexStore(), chunks...)
+
+ err = c.RemoveOldestMaxBatch(context.Background(), st, st.ChunkStore(), 30, 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0)
+
+ verifyChunksDeleted(t, st.ChunkStore(), chunks...)
+}
+
func TestShallowCopy(t *testing.T) {
t.Parallel()
@@ -455,6 +486,48 @@ func TestShallowCopyOverCap(t *testing.T) {
verifyChunksDeleted(t, st.ChunkStore(), chunks[5:10]...)
}
+func TestShallowCopyAlreadyCached(t *testing.T) {
+ t.Parallel()
+
+ st := newTestStorage(t)
+ c, err := cache.New(context.Background(), st, 1000)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ chunks := chunktest.GenerateTestRandomChunks(10)
+ chunksToMove := make([]swarm.Address, 0, 10)
+
+ for _, ch := range chunks {
+ // add the chunks to chunkstore. This simulates the reserve already populating the chunkstore with chunks.
+ err := st.ChunkStore().Put(context.Background(), ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // already cached
+ err = c.Putter(st).Put(context.Background(), ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ chunksToMove = append(chunksToMove, ch.Address())
+ }
+
+ // move new chunks
+ err = c.ShallowCopy(context.Background(), st, chunksToMove...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyChunksExist(t, st.ChunkStore(), chunks...)
+
+ err = c.RemoveOldest(context.Background(), st, st.ChunkStore(), 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyChunksDeleted(t, st.ChunkStore(), chunks...)
+}
+
func verifyCacheState(
t *testing.T,
store storage.Store,
@@ -523,3 +596,21 @@ func verifyChunksDeleted(
}
}
}
+
+func verifyChunksExist(
+ t *testing.T,
+ chStore storage.ChunkStore,
+ chs ...swarm.Chunk,
+) {
+ t.Helper()
+
+ for _, ch := range chs {
+ found, err := chStore.Has(context.TODO(), ch.Address())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !found {
+ t.Fatalf("chunk %s expected to be found but not exists", ch.Address())
+ }
+ }
+}
diff --git a/pkg/storer/internal/cache/export_test.go b/pkg/storer/internal/cache/export_test.go
index 770bdd64690..9a8eda3afa3 100644
--- a/pkg/storer/internal/cache/export_test.go
+++ b/pkg/storer/internal/cache/export_test.go
@@ -5,10 +5,12 @@
package cache
import (
+ "context"
"fmt"
"time"
storage "github.com/ethersphere/bee/pkg/storage"
+ "github.com/ethersphere/bee/pkg/storer/internal"
"github.com/ethersphere/bee/pkg/swarm"
)
@@ -35,6 +37,10 @@ type CacheState struct {
Size uint64
}
+func (c *Cache) RemoveOldestMaxBatch(ctx context.Context, store internal.Storage, chStore storage.ChunkStore, count uint64, batchCnt int) error {
+ return c.removeOldest(ctx, store, store.ChunkStore(), count, batchCnt)
+}
+
func (c *Cache) State(store storage.Store) CacheState {
state := CacheState{}
state.Size = c.Size()
diff --git a/pkg/storer/internal/pinning/export_test.go b/pkg/storer/internal/pinning/export_test.go
index f80bbd3c945..79e5864dfba 100644
--- a/pkg/storer/internal/pinning/export_test.go
+++ b/pkg/storer/internal/pinning/export_test.go
@@ -23,6 +23,7 @@ var (
ErrInvalidPinCollectionItemSize = errInvalidPinCollectionSize
ErrPutterAlreadyClosed = errPutterAlreadyClosed
ErrCollectionRootAddressIsZero = errCollectionRootAddressIsZero
+ ErrDuplicatePinCollection = errDuplicatePinCollection
)
var NewUUID = newUUID
diff --git a/pkg/storer/internal/pinning/pinning.go b/pkg/storer/internal/pinning/pinning.go
index 03c47667180..8fb25ba92ad 100644
--- a/pkg/storer/internal/pinning/pinning.go
+++ b/pkg/storer/internal/pinning/pinning.go
@@ -40,6 +40,8 @@ var (
// errCollectionRootAddressIsZero is returned if the putter is closed with a zero
// swarm.Address. Root reference has to be set.
errCollectionRootAddressIsZero = errors.New("pin store: collection root address is zero")
+ // errDuplicatePinCollection is returned when attempted to pin the same file repeatedly
+ errDuplicatePinCollection = errors.New("pin store: duplicate pin collection")
)
// creates a new UUID and returns it as a byte slice
@@ -264,9 +266,21 @@ func (c *collectionPutter) Close(st internal.Storage, writer storage.Writer, roo
c.mtx.Lock()
defer c.mtx.Unlock()
+ collection := &pinCollectionItem{Addr: root}
+ has, err := st.IndexStore().Has(collection)
+
+ if err != nil {
+ return fmt.Errorf("pin store: check previous root: %w", err)
+ }
+
+ if has {
+ // trigger the Cleanup
+ return errDuplicatePinCollection
+ }
+
// Save the root pin reference.
c.collection.Addr = root
- err := writer.Put(c.collection)
+ err = writer.Put(c.collection)
if err != nil {
return fmt.Errorf("pin store: failed updating collection: %w", err)
}
diff --git a/pkg/storer/internal/pinning/pinning_test.go b/pkg/storer/internal/pinning/pinning_test.go
index 096b69b0018..017b135f6f8 100644
--- a/pkg/storer/internal/pinning/pinning_test.go
+++ b/pkg/storer/internal/pinning/pinning_test.go
@@ -276,6 +276,29 @@ func TestPinStore(t *testing.T) {
}
})
+ t.Run("duplicate collection", func(t *testing.T) {
+ root := chunktest.GenerateTestRandomChunk()
+ putter, err := pinstore.NewCollection(st)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = putter.Put(context.Background(), st, st.IndexStore(), root)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = putter.Close(st, st.IndexStore(), root.Address())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = putter.Close(st, st.IndexStore(), root.Address())
+ if err == nil || !errors.Is(err, pinstore.ErrDuplicatePinCollection) {
+ t.Fatalf("unexpected error during CLose, want: %v, got: %v", pinstore.ErrDuplicatePinCollection, err)
+ }
+ })
+
t.Run("zero address close", func(t *testing.T) {
root := chunktest.GenerateTestRandomChunk()
putter, err := pinstore.NewCollection(st)
@@ -292,7 +315,6 @@ func TestPinStore(t *testing.T) {
if !errors.Is(err, pinstore.ErrCollectionRootAddressIsZero) {
t.Fatalf("unexpected error on close, want: %v, got: %v", pinstore.ErrCollectionRootAddressIsZero, err)
}
-
})
}
diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go
index 04f5219715c..47c843e48b1 100644
--- a/pkg/storer/internal/reserve/reserve.go
+++ b/pkg/storer/internal/reserve/reserve.go
@@ -118,17 +118,11 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C
newStampIndex := true
- switch item, loaded, err := stampindex.LoadOrStore(
- indexStore,
- storeBatch,
- reserveNamespace,
- chunk,
- ); {
- case err != nil:
+ item, loaded, err := stampindex.LoadOrStore(indexStore, storeBatch, reserveNamespace, chunk)
+ if err != nil {
return false, fmt.Errorf("load or store stamp index for chunk %v has fail: %w", chunk, err)
- case loaded && item.ChunkIsImmutable:
- return false, fmt.Errorf("batch %s index %s: %w", hex.EncodeToString(chunk.Stamp().BatchID()), hex.EncodeToString(chunk.Stamp().Index()), storage.ErrOverwriteOfImmutableBatch)
- case loaded && !item.ChunkIsImmutable:
+ }
+ if loaded {
prev := binary.BigEndian.Uint64(item.StampTimestamp)
curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp())
if prev >= curr {
diff --git a/pkg/storer/internal/upload/uploadstore.go b/pkg/storer/internal/upload/uploadstore.go
index 1126770176a..81fed2a8bcb 100644
--- a/pkg/storer/internal/upload/uploadstore.go
+++ b/pkg/storer/internal/upload/uploadstore.go
@@ -17,7 +17,6 @@ import (
"github.com/ethersphere/bee/pkg/storage/storageutil"
"github.com/ethersphere/bee/pkg/storer/internal"
"github.com/ethersphere/bee/pkg/storer/internal/chunkstamp"
- "github.com/ethersphere/bee/pkg/storer/internal/stampindex"
"github.com/ethersphere/bee/pkg/swarm"
)
@@ -231,10 +230,17 @@ type uploadItem struct {
TagID uint64
Uploaded int64
Synced int64
+
+ // IdFunc overrides the ID method.
+ // This used to get the ID from the item where the address and batchID were not marshalled.
+ IdFunc func() string
}
// ID implements the storage.Item interface.
func (i uploadItem) ID() string {
+ if i.IdFunc != nil {
+ return i.IdFunc()
+ }
return storageutil.JoinFields(i.Address.ByteString(), string(i.BatchID))
}
@@ -351,10 +357,6 @@ func (i dirtyTagItem) String() string {
return storageutil.JoinFields(i.Namespace(), i.ID())
}
-// stampIndexUploadNamespace represents the
-// namespace name of the stamp index for upload.
-const stampIndexUploadNamespace = "upload"
-
var (
// errPutterAlreadyClosed is returned when trying to Put a new chunk
// after the putter has been closed.
@@ -420,28 +422,6 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora
return nil
}
- switch item, loaded, err := stampindex.LoadOrStore(
- s.IndexStore(),
- writer,
- stampIndexUploadNamespace,
- chunk,
- ); {
- case err != nil:
- return fmt.Errorf("load or store stamp index for chunk %v has fail: %w", chunk, err)
- case loaded && item.ChunkIsImmutable:
- return errOverwriteOfImmutableBatch
- case loaded && !item.ChunkIsImmutable:
- prev := binary.BigEndian.Uint64(item.StampTimestamp)
- curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp())
- if prev > curr {
- return errOverwriteOfNewerBatch
- }
- err = stampindex.Store(writer, stampIndexUploadNamespace, chunk)
- if err != nil {
- return fmt.Errorf("failed updating stamp index: %w", err)
- }
- }
-
u.split++
if err := s.ChunkStore().Put(ctx, chunk); err != nil {
@@ -711,10 +691,9 @@ func Report(
return fmt.Errorf("failed deleting chunk %s: %w", chunk.Address(), err)
}
- ui.Synced = now().UnixNano()
- err = batch.Put(ui)
+ err = batch.Delete(ui)
if err != nil {
- return fmt.Errorf("failed updating uploadItem %s: %w", ui, err)
+ return fmt.Errorf("failed deleting uploadItem %s: %w", ui, err)
}
return batch.Commit()
@@ -848,15 +827,31 @@ func DeleteTag(st storage.Store, tagID uint64) error {
return nil
}
-func IterateAll(st storage.Store, iterateFn func(addr swarm.Address, isSynced bool) (bool, error)) error {
+func IterateAll(st storage.Store, iterateFn func(item storage.Item) (bool, error)) error {
return st.Iterate(
storage.Query{
Factory: func() storage.Item { return new(uploadItem) },
},
func(r storage.Result) (bool, error) {
- address := swarm.NewAddress([]byte(r.ID[:32]))
- synced := r.Entry.(*uploadItem).Synced != 0
- return iterateFn(address, synced)
+ ui := r.Entry.(*uploadItem)
+ ui.IdFunc = func() string {
+ return r.ID
+ }
+ return iterateFn(ui)
+ },
+ )
+}
+
+func IterateAllTagItems(st storage.Store, cb func(ti *TagItem) (bool, error)) error {
+ return st.Iterate(
+ storage.Query{
+ Factory: func() storage.Item {
+ return new(TagItem)
+ },
+ },
+ func(result storage.Result) (bool, error) {
+ ti := result.Entry.(*TagItem)
+ return cb(ti)
},
)
}
diff --git a/pkg/storer/internal/upload/uploadstore_test.go b/pkg/storer/internal/upload/uploadstore_test.go
index 569b8c7999e..a2f7f931913 100644
--- a/pkg/storer/internal/upload/uploadstore_test.go
+++ b/pkg/storer/internal/upload/uploadstore_test.go
@@ -7,7 +7,6 @@ package upload_test
import (
"bytes"
"context"
- "encoding/binary"
"errors"
"fmt"
"math"
@@ -16,7 +15,6 @@ import (
"testing"
"time"
- "github.com/ethersphere/bee/pkg/postage"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/storagetest"
chunktest "github.com/ethersphere/bee/pkg/storage/testing"
@@ -528,20 +526,27 @@ func TestChunkPutter(t *testing.T) {
t.Run("iterate all", func(t *testing.T) {
count := 0
- err := upload.IterateAll(ts.IndexStore(), func(addr swarm.Address, synced bool) (bool, error) {
- count++
- if synced {
- t.Fatal("expected synced to be false")
- }
- has, err := ts.ChunkStore().Has(context.Background(), addr)
- if err != nil {
- t.Fatalf("unexpected error in Has(...): %v", err)
- }
- if !has {
- t.Fatalf("expected chunk to be present %s", addr.String())
- }
- return false, nil
- })
+ err := ts.IndexStore().Iterate(
+ storage.Query{
+ Factory: func() storage.Item { return new(upload.UploadItem) },
+ },
+ func(r storage.Result) (bool, error) {
+ address := swarm.NewAddress([]byte(r.ID[:32]))
+ synced := r.Entry.(*upload.UploadItem).Synced != 0
+ count++
+ if synced {
+ t.Fatal("expected synced to be false")
+ }
+ has, err := ts.ChunkStore().Has(context.Background(), address)
+ if err != nil {
+ t.Fatalf("unexpected error in Has(...): %v", err)
+ }
+ if !has {
+ t.Fatalf("expected chunk to be present %s", address.String())
+ }
+ return false, nil
+ },
+ )
if err != nil {
t.Fatalf("IterateAll(...): unexpected error %v", err)
}
@@ -573,6 +578,28 @@ func TestChunkPutter(t *testing.T) {
if diff := cmp.Diff(wantTI, ti); diff != "" {
t.Fatalf("Get(...): unexpected TagItem (-want +have):\n%s", diff)
}
+
+ t.Run("iterate all tag items", func(t *testing.T) {
+ var tagItemsCount, uploaded, synced uint64
+ err := upload.IterateAllTagItems(ts.IndexStore(), func(ti *upload.TagItem) (bool, error) {
+ uploaded += ti.Split
+ synced += ti.Synced
+ tagItemsCount++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatalf("IterateAllTagItems(...): unexpected error %v", err)
+ }
+ if tagItemsCount != 1 {
+ t.Fatalf("unexpected tagItemsCount: want 1 have %d", tagItemsCount)
+ }
+ if uploaded != 20 {
+ t.Fatalf("unexpected uploaded: want 20 have %d", uploaded)
+ }
+ if synced != 0 {
+ t.Fatalf("unexpected synced: want 0 have %d", synced)
+ }
+ })
})
t.Run("error after close", func(t *testing.T) {
@@ -711,20 +738,12 @@ func TestChunkReporter(t *testing.T) {
Address: chunk.Address(),
BatchID: chunk.Stamp().BatchID(),
}
- err = ts.IndexStore().Get(ui)
+ has, err := ts.IndexStore().Has(ui)
if err != nil {
- t.Fatalf("Get(...): unexpected error: %v", err)
- }
- wantUI := &upload.UploadItem{
- Address: chunk.Address(),
- BatchID: chunk.Stamp().BatchID(),
- TagID: tag.TagID,
- Uploaded: now().UnixNano(),
- Synced: now().UnixNano(),
+ t.Fatalf("unexpected error: %v", err)
}
-
- if diff := cmp.Diff(wantUI, ui); diff != "" {
- t.Fatalf("Get(...): unexpected UploadItem (-want +have):\n%s", diff)
+ if has {
+ t.Fatalf("expected to not be found: %s", ui)
}
pi := &upload.PushItem{
@@ -732,7 +751,7 @@ func TestChunkReporter(t *testing.T) {
Address: chunk.Address(),
BatchID: chunk.Stamp().BatchID(),
}
- has, err := ts.IndexStore().Has(pi)
+ has, err = ts.IndexStore().Has(pi)
if err != nil {
t.Fatalf("Has(...): unexpected error: %v", err)
}
@@ -780,93 +799,6 @@ func TestChunkReporter(t *testing.T) {
})
}
-func TestStampIndexHandling(t *testing.T) {
- t.Parallel()
-
- ts := newTestStorage(t)
-
- tag, err := upload.NextTag(ts.IndexStore())
- if err != nil {
- t.Fatalf("failed creating tag: %v", err)
- }
-
- putter, err := upload.NewPutter(ts, tag.TagID)
- if err != nil {
- t.Fatalf("failed creating putter: %v", err)
- }
-
- t.Run("put chunk with immutable batch", func(t *testing.T) {
- chunk := chunktest.GenerateTestRandomChunk()
- chunk = chunk.WithBatch(
- chunk.Radius(),
- chunk.Depth(),
- chunk.BucketDepth(),
- true,
- )
- if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil {
- t.Fatalf("Put(...): unexpected error: %v", err)
- }
-
- chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(chunk.Stamp())
-
- want := upload.ErrOverwriteOfImmutableBatch
- have := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2)
- if !errors.Is(have, want) {
- t.Fatalf("Put(...): unexpected error:\n\twant: %v\n\thave: %v", want, have)
- }
- })
-
- t.Run("put existing index with older batch timestamp", func(t *testing.T) {
- chunk := chunktest.GenerateTestRandomChunk()
- if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil {
- t.Fatalf("Put(...): unexpected error: %v", err)
- }
-
- decTS := binary.BigEndian.Uint64(chunk.Stamp().Timestamp())
- encTS := make([]byte, 8)
- binary.BigEndian.PutUint64(encTS, decTS-1)
-
- stamp := postage.NewStamp(
- chunk.Stamp().BatchID(),
- chunk.Stamp().Index(),
- encTS,
- chunk.Stamp().Sig(),
- )
-
- chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(stamp)
-
- want := upload.ErrOverwriteOfNewerBatch
- have := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2)
- if !errors.Is(have, want) {
- t.Fatalf("Put(...): unexpected error:\n\twant: %v\n\thave: %v", want, have)
- }
- })
-
- t.Run("put existing chunk with newer batch timestamp", func(t *testing.T) {
- chunk := chunktest.GenerateTestRandomChunk()
- if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil {
- t.Fatalf("Put(...): unexpected error: %v", err)
- }
-
- decTS := binary.BigEndian.Uint64(chunk.Stamp().Timestamp())
- encTS := make([]byte, 8)
- binary.BigEndian.PutUint64(encTS, decTS+1)
-
- stamp := postage.NewStamp(
- chunk.Stamp().BatchID(),
- chunk.Stamp().Index(),
- encTS,
- chunk.Stamp().Sig(),
- )
-
- chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(stamp)
-
- if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2); err != nil {
- t.Fatalf("Put(...): unexpected error: %v", err)
- }
- })
-}
-
func TestNextTagID(t *testing.T) {
t.Parallel()
diff --git a/pkg/storer/migration/all_steps.go b/pkg/storer/migration/all_steps.go
index ef028659f6d..69aa419747f 100644
--- a/pkg/storer/migration/all_steps.go
+++ b/pkg/storer/migration/all_steps.go
@@ -21,6 +21,7 @@ func AfterInitSteps(
2: step_02,
3: step_03(chunkStore, reserve.ChunkType),
4: step_04(sharkyPath, sharkyNoOfShards),
+ 5: step_05,
}
}
diff --git a/pkg/storer/migration/export_test.go b/pkg/storer/migration/export_test.go
index 210d6973607..bfebec5cb42 100644
--- a/pkg/storer/migration/export_test.go
+++ b/pkg/storer/migration/export_test.go
@@ -9,4 +9,5 @@ var (
Step_02 = step_02
Step_03 = step_03
Step_04 = step_04
+ Step_05 = step_05
)
diff --git a/pkg/storer/migration/step_05.go b/pkg/storer/migration/step_05.go
new file mode 100644
index 00000000000..f9aa75b2b00
--- /dev/null
+++ b/pkg/storer/migration/step_05.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migration
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/ethersphere/bee/pkg/log"
+ "github.com/ethersphere/bee/pkg/storage"
+ "github.com/ethersphere/bee/pkg/storer/internal/upload"
+)
+
+// step_05 is a migration step that removes all upload items from the store.
+func step_05(st storage.BatchedStore) error {
+ logger := log.NewLogger("migration-step-05", log.WithSink(os.Stdout))
+ logger.Info("start removing upload items")
+
+ itemC := make(chan storage.Item)
+ errC := make(chan error)
+ go func() {
+ for item := range itemC {
+ err := st.Delete(item)
+ if err != nil {
+ errC <- fmt.Errorf("delete upload item: %w", err)
+ return
+ }
+ }
+ close(errC)
+ }()
+
+ go func() {
+ defer close(itemC)
+ err := upload.IterateAll(st, func(u storage.Item) (bool, error) {
+ itemC <- u
+ return false, nil
+ })
+ if err != nil {
+ errC <- fmt.Errorf("iterate upload items: %w", err)
+ return
+ }
+ }()
+
+ err := <-errC
+ if err != nil {
+ return err
+ }
+
+ logger.Info("finished removing upload items")
+ return nil
+}
diff --git a/pkg/storer/migration/step_05_test.go b/pkg/storer/migration/step_05_test.go
new file mode 100644
index 00000000000..cc7a88214a8
--- /dev/null
+++ b/pkg/storer/migration/step_05_test.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migration_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethersphere/bee/pkg/node"
+ "github.com/ethersphere/bee/pkg/postage"
+ "github.com/ethersphere/bee/pkg/storage"
+ chunktest "github.com/ethersphere/bee/pkg/storage/testing"
+ "github.com/ethersphere/bee/pkg/storer"
+ "github.com/ethersphere/bee/pkg/storer/internal"
+ "github.com/ethersphere/bee/pkg/storer/internal/upload"
+ localmigration "github.com/ethersphere/bee/pkg/storer/migration"
+ "github.com/ethersphere/bee/pkg/swarm"
+ kademlia "github.com/ethersphere/bee/pkg/topology/mock"
+ "github.com/ethersphere/bee/pkg/util/testutil"
+)
+
+func Test_Step_05(t *testing.T) {
+ t.Parallel()
+
+ db, err := storer.New(context.Background(), "", &storer.Options{
+ Logger: testutil.NewLogger(t),
+ RadiusSetter: kademlia.NewTopologyDriver(),
+ Batchstore: new(postage.NoOpBatchStore),
+ ReserveCapacity: node.ReserveCapacity,
+ })
+ if err != nil {
+ t.Fatalf("New(...): unexpected error: %v", err)
+ }
+
+ t.Cleanup(func() {
+ err := db.Close()
+ if err != nil {
+ t.Fatalf("Close(): unexpected closing storer: %v", err)
+ }
+ })
+
+ wantCount := func(t *testing.T, st internal.Storage, want int) {
+ t.Helper()
+ count := 0
+ err = upload.IterateAll(st.IndexStore(), func(_ storage.Item) (bool, error) {
+ count++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatalf("iterate upload items: %v", err)
+ }
+ if count != want {
+ t.Fatalf("expected %d upload items, got %d", want, count)
+ }
+ }
+
+ err = db.Execute(context.Background(), func(st internal.Storage) error {
+ tag, err := upload.NextTag(st.IndexStore())
+ if err != nil {
+ t.Fatalf("create tag: %v", err)
+ }
+
+ putter, err := upload.NewPutter(st, tag.TagID)
+ if err != nil {
+ t.Fatalf("create putter: %v", err)
+ }
+ ctx := context.Background()
+ chunks := chunktest.GenerateTestRandomChunks(10)
+ b, err := st.IndexStore().Batch(ctx)
+ if err != nil {
+ t.Fatalf("create batch: %v", err)
+ }
+
+ for _, ch := range chunks {
+ err := putter.Put(ctx, st, b, ch)
+ if err != nil {
+ t.Fatalf("put chunk: %v", err)
+ }
+ }
+ err = putter.Close(st, st.IndexStore(), swarm.RandAddress(t))
+ if err != nil {
+ t.Fatalf("close putter: %v", err)
+ }
+ err = b.Commit()
+ if err != nil {
+ t.Fatalf("commit batch: %v", err)
+ }
+
+ wantCount(t, st, 10)
+ err = localmigration.Step_05(st.IndexStore())
+ if err != nil {
+ t.Fatalf("step 05: %v", err)
+ }
+ wantCount(t, st, 0)
+ return nil
+ })
+ if err != nil {
+ t.Fatalf("execute: %v", err)
+ }
+}
diff --git a/pkg/storer/netstore.go b/pkg/storer/netstore.go
index 21ed95753f3..3d5a0c01057 100644
--- a/pkg/storer/netstore.go
+++ b/pkg/storer/netstore.go
@@ -12,6 +12,8 @@ import (
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
"golang.org/x/sync/errgroup"
)
@@ -25,11 +27,19 @@ func (db *DB) DirectUpload() PutterSession {
Putter: putterWithMetrics{
storage.PutterFunc(func(ctx context.Context, ch swarm.Chunk) error {
db.directUploadLimiter <- struct{}{}
- eg.Go(func() error {
+ eg.Go(func() (err error) {
defer func() { <-db.directUploadLimiter }()
+ span, logger, ctx := db.tracer.FollowSpanFromContext(ctx, "put-direct-upload", db.logger)
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ }
+ span.Finish()
+ }()
+
for {
- op := &pusher.Op{Chunk: ch, Err: make(chan error, 1), Direct: true}
+ op := &pusher.Op{Chunk: ch, Err: make(chan error, 1), Direct: true, Span: span}
select {
case <-ctx.Done():
return ctx.Err()
@@ -47,9 +57,9 @@ func (db *DB) DirectUpload() PutterSession {
return ErrDBQuit
case err := <-op.Err:
if errors.Is(err, pusher.ErrShallowReceipt) {
- db.logger.Debug("direct upload: shallow receipt received, retrying", "chunk", ch.Address())
+ logger.Debug("direct upload: shallow receipt received, retrying", "chunk", ch.Address())
} else if errors.Is(err, topology.ErrNotFound) {
- db.logger.Debug("direct upload: no peers available, retrying", "chunk", ch.Address())
+ logger.Debug("direct upload: no peers available, retrying", "chunk", ch.Address())
} else {
return err
}
@@ -62,7 +72,7 @@ func (db *DB) DirectUpload() PutterSession {
db.metrics,
"netstore",
},
- done: func(_ swarm.Address) error { return eg.Wait() },
+ done: func(swarm.Address) error { return eg.Wait() },
cleanup: func() error { _ = eg.Wait(); return nil },
}
}
@@ -70,12 +80,25 @@ func (db *DB) DirectUpload() PutterSession {
// Download is the implementation of the NetStore.Download method.
func (db *DB) Download(cache bool) storage.Getter {
return getterWithMetrics{
- storage.GetterFunc(func(ctx context.Context, address swarm.Address) (swarm.Chunk, error) {
- ch, err := db.Lookup().Get(ctx, address)
+ storage.GetterFunc(func(ctx context.Context, address swarm.Address) (ch swarm.Chunk, err error) {
+
+ span, logger, ctx := db.tracer.StartSpanFromContext(ctx, "get-chunk", db.logger)
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ span.LogFields(olog.Bool("success", true))
+ }
+ span.Finish()
+ }()
+
+ ch, err = db.Lookup().Get(ctx, address)
switch {
case err == nil:
+ span.LogFields(olog.String("step", "chunk found locally"))
return ch, nil
case errors.Is(err, storage.ErrNotFound):
+ span.LogFields(olog.String("step", "retrieve chunk from network"))
if db.retrieval != nil {
// if chunk is not found locally, retrieve it from the network
ch, err = db.retrieval.RetrieveChunk(ctx, address, swarm.ZeroAddress)
@@ -93,7 +116,7 @@ func (db *DB) Download(cache bool) storage.Getter {
err := db.Cache().Put(db.cacheLimiter.ctx, ch)
if err != nil {
- db.logger.Debug("putting chunk to cache failed", "error", err, "chunk_address", ch.Address())
+ logger.Debug("putting chunk to cache failed", "error", err, "chunk_address", ch.Address())
}
}()
}
diff --git a/pkg/storer/pinstore_test.go b/pkg/storer/pinstore_test.go
index 676a7bd6260..43676f6ce61 100644
--- a/pkg/storer/pinstore_test.go
+++ b/pkg/storer/pinstore_test.go
@@ -127,6 +127,51 @@ func testPinStore(t *testing.T, newStorer func() (*storer.DB, error)) {
verifyPinCollection(t, lstore.Repo(), testCases[0].chunks[0], testCases[0].chunks, true)
})
})
+
+ t.Run("duplicate parallel upload does not leave orphaned chunks", func(t *testing.T) {
+ chunks := chunktesting.GenerateTestRandomChunks(4)
+
+ session1, err := lstore.NewCollection(context.TODO())
+ if err != nil {
+ t.Fatalf("NewCollection(...): unexpected error: %v", err)
+ }
+
+ session2, err := lstore.NewCollection(context.TODO())
+ if err != nil {
+ t.Fatalf("NewCollection2(...): unexpected error: %v", err)
+ }
+
+ for _, ch := range chunks {
+ err := session2.Put(context.TODO(), ch)
+ if err != nil {
+ t.Fatalf("session2.Put(...): unexpected error: %v", err)
+ t.Fatal(err)
+ }
+
+ err = session1.Put(context.TODO(), ch)
+ if err != nil {
+ t.Fatalf("session1.Put(...): unexpected error: %v", err)
+ t.Fatal(err)
+ }
+ }
+
+ err = session1.Done(chunks[0].Address())
+ if err != nil {
+ t.Fatalf("session1.Done(...): unexpected error: %v", err)
+ }
+
+ err = session2.Done(chunks[0].Address())
+ if err == nil {
+ t.Fatalf("session2.Done(...): expected error, got nil")
+ }
+
+ if err := session2.Cleanup(); err != nil {
+ t.Fatalf("session2.Done(...): unexpected error: %v", err)
+ }
+
+ verifyPinCollection(t, lstore.Repo(), chunks[0], chunks, true)
+ verifyChunkRefCount(t, lstore.Repo(), chunks)
+ })
}
func TestPinStore(t *testing.T) {
diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go
index 49ec3740738..3ebb3a025d7 100644
--- a/pkg/storer/storer.go
+++ b/pkg/storer/storer.go
@@ -37,6 +37,7 @@ import (
localmigration "github.com/ethersphere/bee/pkg/storer/migration"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
+ "github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/util/syncutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/afero"
@@ -381,57 +382,6 @@ func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*
return c, commit()
}
-type noopRadiusSetter struct{}
-
-func (noopRadiusSetter) SetStorageRadius(uint8) {}
-
-func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) {
- store, err := initStore(basePath, opts)
- if err != nil {
- return err
- }
- defer store.Close()
-
- sharkyBasePath := path.Join(basePath, sharkyPath)
- var sharkyRecover *sharky.Recovery
- // if this is a fresh node then perform an empty epoch migration
- if _, err := os.Stat(sharkyBasePath); err == nil {
- sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize)
- if err != nil {
- return err
- }
- defer sharkyRecover.Close()
- }
-
- logger := opts.Logger.WithName("epochmigration").Register()
-
- var rs reservePutter
-
- if opts.ReserveCapacity > 0 {
- rs, err = reserve.New(
- opts.Address,
- store,
- opts.ReserveCapacity,
- noopRadiusSetter{},
- logger,
- func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error {
- return nil
- },
- )
- if err != nil {
- return err
- }
- }
-
- defer func() {
- if sharkyRecover != nil {
- retErr = errors.Join(retErr, sharkyRecover.Save())
- }
- }()
-
- return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger)
-}
-
const lockKeyNewSession string = "new_session"
// Options provides a container to configure different things in the storer.
@@ -444,6 +394,7 @@ type Options struct {
LdbDisableSeeksCompaction bool
CacheCapacity uint64
Logger log.Logger
+ Tracer *tracing.Tracer
Address swarm.Address
WarmupDuration time.Duration
@@ -480,7 +431,9 @@ type cacheLimiter struct {
// DB implements all the component stores described above.
type DB struct {
- logger log.Logger
+ logger log.Logger
+ tracer *tracing.Tracer
+
metrics metrics
repo storage.Repository
@@ -544,14 +497,6 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
return nil, err
}
} else {
- // only perform migration if not done already
- if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil {
- err = performEpochMigration(ctx, dirPath, opts)
- if err != nil {
- return nil, err
- }
- }
-
repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts)
if err != nil {
return nil, err
@@ -582,6 +527,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
db := &DB{
metrics: metrics,
logger: logger,
+ tracer: opts.Tracer,
baseAddr: opts.Address,
repo: repo,
lock: lock,
diff --git a/pkg/storer/storer_test.go b/pkg/storer/storer_test.go
index ccc491912b6..ce2d7eb72cf 100644
--- a/pkg/storer/storer_test.go
+++ b/pkg/storer/storer_test.go
@@ -18,6 +18,7 @@ import (
"github.com/ethersphere/bee/pkg/storage/inmemchunkstore"
"github.com/ethersphere/bee/pkg/storage/migration"
"github.com/ethersphere/bee/pkg/storer"
+ cs "github.com/ethersphere/bee/pkg/storer/internal/chunkstore"
pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/pkg/storer/internal/upload"
localmigration "github.com/ethersphere/bee/pkg/storer/migration"
@@ -44,7 +45,26 @@ func verifyChunks(
t.Fatalf("unexpected chunk has state: want %t have %t", has, hasFound)
}
}
+}
+func verifyChunkRefCount(
+ t *testing.T,
+ repo storage.Repository,
+ chunks []swarm.Chunk,
+) {
+ t.Helper()
+
+ for _, ch := range chunks {
+ _ = repo.IndexStore().Iterate(storage.Query{
+ Factory: func() storage.Item { return new(cs.RetrievalIndexItem) },
+ }, func(r storage.Result) (bool, error) {
+ entry := r.Entry.(*cs.RetrievalIndexItem)
+ if entry.Address.Equal(ch.Address()) && entry.RefCnt != 1 {
+ t.Errorf("chunk %s has refCnt=%d", ch.Address(), entry.RefCnt)
+ }
+ return false, nil
+ })
+ }
}
func verifySessionInfo(
diff --git a/pkg/storer/validate.go b/pkg/storer/validate.go
index 5eb5cfb5e45..ccad569455d 100644
--- a/pkg/storer/validate.go
+++ b/pkg/storer/validate.go
@@ -7,16 +7,20 @@ package storer
import (
"context"
"fmt"
+ "os"
"path"
"sync"
"time"
+ "sync/atomic"
+
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/sharky"
"github.com/ethersphere/bee/pkg/soc"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storer/internal/chunkstore"
+ pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/pkg/swarm"
)
@@ -141,3 +145,163 @@ func validateWork(logger log.Logger, store storage.Store, readFn func(context.Co
wg.Wait()
}
+
+// ValidatePinCollectionChunks collects all chunk addresses that are present in a pin collection but
+// are either invalid or missing altogether.
+func ValidatePinCollectionChunks(ctx context.Context, basePath, pin, location string, opts *Options) error {
+ logger := opts.Logger
+
+ store, err := initStore(basePath, opts)
+ if err != nil {
+ return fmt.Errorf("failed creating levelDB index store: %w", err)
+ }
+ defer func() {
+ if err := store.Close(); err != nil {
+ logger.Error(err, "failed closing store")
+ }
+ }()
+
+ fs := &dirFS{basedir: path.Join(basePath, sharkyPath)}
+ sharky, err := sharky.New(fs, sharkyNoOfShards, swarm.SocMaxChunkSize)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := sharky.Close(); err != nil {
+ logger.Error(err, "failed closing sharky")
+ }
+ }()
+
+ logger.Info("performing chunk validation")
+ validatePins(logger, store, pin, location, sharky.Read)
+
+ return nil
+}
+
+func validatePins(logger log.Logger, store storage.Store, pin, location string, readFn func(context.Context, sharky.Location, []byte) error) {
+ var stats struct {
+ total, read, invalid atomic.Int32
+ }
+
+ n := time.Now()
+ defer func() {
+ logger.Info("done", "duration", time.Since(n), "read", stats.read.Load(), "invalid", stats.invalid.Load(), "total", stats.total.Load())
+ }()
+
+ validChunk := func(item *chunkstore.RetrievalIndexItem, buf []byte) bool {
+ stats.total.Add(1)
+
+ if err := readFn(context.Background(), item.Location, buf); err != nil {
+ stats.read.Add(1)
+ return true
+ }
+
+ ch := swarm.NewChunk(item.Address, buf)
+
+ if cac.Valid(ch) {
+ return true
+ }
+
+ if soc.Valid(ch) {
+ return true
+ }
+
+ stats.invalid.Add(1)
+
+ return false
+ }
+
+ var pins []swarm.Address
+
+ if pin != "" {
+ addr, err := swarm.ParseHexAddress(pin)
+ if err != nil {
+ panic(fmt.Sprintf("parse provided pin: %s", err))
+ }
+ pins = append(pins, addr)
+ } else {
+ var err error
+ pins, err = pinstore.Pins(store)
+ if err != nil {
+ logger.Error(err, "get pins")
+ return
+ }
+ }
+
+ logger.Info("got a total number of pins", "size", len(pins))
+
+ var (
+ fileName = "address.csv"
+ fileLoc = "."
+ )
+
+ if location != "" {
+ if path.Ext(location) != "" {
+ fileName = path.Base(location)
+ }
+ fileLoc = path.Dir(location)
+ }
+
+ logger.Info("saving stats to", "location", fileLoc, "name", fileName)
+
+ location = path.Join(fileLoc, fileName)
+
+ f, err := os.OpenFile(location, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ logger.Error(err, "open output file for writing")
+ return
+ }
+
+ if _, err := f.WriteString("invalid\tmissing\ttotal\taddress\n"); err != nil {
+ logger.Error(err, "write title")
+ return
+ }
+
+ defer f.Close()
+
+ for _, pin := range pins {
+ var wg sync.WaitGroup
+ var (
+ total, missing, invalid atomic.Int32
+ )
+
+ iteratateItemsC := make(chan *chunkstore.RetrievalIndexItem)
+
+ for i := 0; i < 8; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ buf := make([]byte, swarm.SocMaxChunkSize)
+ for item := range iteratateItemsC {
+ if !validChunk(item, buf[:item.Location.Length]) {
+ invalid.Add(1)
+ }
+ }
+ }()
+ }
+
+ logger.Info("start iteration", "pin", pin)
+
+ _ = pinstore.IterateCollection(store, pin, func(addr swarm.Address) (bool, error) {
+ total.Add(1)
+ rIdx := &chunkstore.RetrievalIndexItem{Address: addr}
+ if err := store.Get(rIdx); err != nil {
+ missing.Add(1)
+ } else {
+ iteratateItemsC <- rIdx
+ }
+ return false, nil
+ })
+
+ close(iteratateItemsC)
+
+ wg.Wait()
+
+ report := fmt.Sprintf("%d\t%d\t%d\t%s\n", invalid.Load(), missing.Load(), total.Load(), pin)
+
+ if _, err := f.WriteString(report); err != nil {
+ logger.Error(err, "write report line")
+ return
+ }
+ }
+}
diff --git a/pkg/topology/kademlia/kademlia_test.go b/pkg/topology/kademlia/kademlia_test.go
index 8bcf05637e9..48e0c20ecb7 100644
--- a/pkg/topology/kademlia/kademlia_test.go
+++ b/pkg/topology/kademlia/kademlia_test.go
@@ -884,7 +884,7 @@ func TestAddressBookPrune(t *testing.T) {
}
// test pruning addressbook after successive failed connect attempts
-func TestAddressBookQuickPrune(t *testing.T) {
+func TestAddressBookQuickPrune_FLAKY(t *testing.T) {
t.Parallel()
var (
diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go
index 6739e1a03d1..14ccdb9ab43 100644
--- a/pkg/tracing/tracing.go
+++ b/pkg/tracing/tracing.go
@@ -107,6 +107,25 @@ func (t *Tracer) StartSpanFromContext(ctx context.Context, operationName string,
return span, loggerWithTraceID(sc, l), WithContext(ctx, sc)
}
+// FollowSpanFromContext starts a new tracing span that is either a root one or
+// follows an existing one from the provided Context. If logger is provided, a new
+// log Entry will be returned with "traceID" log field.
+func (t *Tracer) FollowSpanFromContext(ctx context.Context, operationName string, l log.Logger, opts ...opentracing.StartSpanOption) (opentracing.Span, log.Logger, context.Context) {
+ if t == nil {
+ t = noopTracer
+ }
+
+ var span opentracing.Span
+ if parentContext := FromContext(ctx); parentContext != nil {
+ opts = append(opts, opentracing.FollowsFrom(parentContext))
+ span = t.tracer.StartSpan(operationName, opts...)
+ } else {
+ span = t.tracer.StartSpan(operationName, opts...)
+ }
+ sc := span.Context()
+ return span, loggerWithTraceID(sc, l), WithContext(ctx, sc)
+}
+
// AddContextHeader adds a tracing span context to provided p2p Headers from
// the go context. If the tracing span context is not present in go context,
// ErrContextNotFound is returned.
diff --git a/pkg/transaction/event_test.go b/pkg/transaction/event_test.go
index 5e06317021f..4d6dd47193e 100644
--- a/pkg/transaction/event_test.go
+++ b/pkg/transaction/event_test.go
@@ -17,7 +17,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_3_1)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
)
type transferEvent struct {
diff --git a/pkg/transaction/transaction_test.go b/pkg/transaction/transaction_test.go
index 62250039c56..0bc8be4f361 100644
--- a/pkg/transaction/transaction_test.go
+++ b/pkg/transaction/transaction_test.go
@@ -935,12 +935,12 @@ func TestTransactionService_UnwrapABIError(t *testing.T) {
txData = common.Hex2Bytes("0xabcdee")
value = big.NewInt(1)
- // This is the ABI of the following contract: https://goerli.etherscan.io/address/0xd29d9e385f19d888557cd609006bb1934cb5d1e2#code
+ // This is the ABI of the following contract: https://sepolia.etherscan.io/address/0xd29d9e385f19d888557cd609006bb1934cb5d1e2#code
contractABI = abiutil.MustParseABI(`[{"inputs":[{"internalType":"uint256","name":"available","type":"uint256"},{"internalType":"uint256","name":"required","type":"uint256"}],"name":"InsufficientBalance","type":"error"},{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[],"stateMutability":"nonpayable","type":"function"}]`)
rpcAPIErr = &rpcAPIError{
code: 3,
msg: "execution reverted",
- err: "0xcf4791810000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006f", // This is the ABI encoded error form the following failed transaction: https://goerli.etherscan.io/tx/0x74a2577db1c325c41e38977aa1eb32ab03dfa17cc1fa0649e84f3d8c0f0882ee
+ err: "0xcf4791810000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006f", // This is the ABI encoded error form the following failed transaction: https://sepolia.etherscan.io/tx/0x74a2577db1c325c41e38977aa1eb32ab03dfa17cc1fa0649e84f3d8c0f0882ee
}
)