diff --git a/.github/workflows/beekeeper.yml b/.github/workflows/beekeeper.yml index 9fff6e646a8..e8690080146 100644 --- a/.github/workflows/beekeeper.yml +++ b/.github/workflows/beekeeper.yml @@ -8,11 +8,11 @@ on: - "**" env: - K3S_VERSION: "v1.22.17+k3s1" + K3S_VERSION: "v1.30.3+k3s1" REPLICA: 3 RUN_TYPE: "PR RUN" SETUP_CONTRACT_IMAGE: "ethersphere/bee-localchain" - SETUP_CONTRACT_IMAGE_TAG: "0.9.2-rc5" + SETUP_CONTRACT_IMAGE_TAG: "0.9.2-rc6" BEELOCAL_BRANCH: "main" BEEKEEPER_BRANCH: "master" BEEKEEPER_METRICS_ENABLED: false diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 371976f1cb0..128e0bae8c5 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -67,10 +67,10 @@ jobs: if: github.ref != 'refs/heads/master' uses: wagoid/commitlint-github-action@v5 - name: GolangCI-Lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: skip-cache: false - version: v1.54.1 + version: v1.61.0 - name: Whitespace check run: make check-whitespace - name: go mod tidy check diff --git a/.golangci.yml b/.golangci.yml index def3e2a3d8d..4e9d42a0380 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,13 +4,12 @@ linters: enable: - asciicheck - bidichk - # - depguard disable temporary until this issue is resolved: https://github.com/golangci/golangci-lint/issues/3906 + - copyloopvar - dogsled - durationcheck - errcheck - errname - errorlint - - exportloopref - forbidigo - gochecknoinits - goconst @@ -33,6 +32,7 @@ linters: - typecheck - unconvert - unused + # - depguard disable temporary until this issue is resolved: https://github.com/golangci/golangci-lint/issues/3906 linters-settings: govet: diff --git a/CODINGSTYLE.md b/CODINGSTYLE.md index 30d6e5a0d5b..6ef48aa3d03 100644 --- a/CODINGSTYLE.md +++ b/CODINGSTYLE.md @@ -161,11 +161,10 @@ Use the Golang [testing package](https://pkg.go.dev/testing) from the standard l ### Parallel Test Execution -Run tests in parallel where possible but don't forget about variable scope gotchas. +Run tests in parallel where possible. ```go for tc := range tt { - tc := tc // must not forget this t.Run(tc.name, func(t *testing.T) { t.Parallel() //execute diff --git a/Dockerfile b/Dockerfile index e0f0738de6c..c957601a9ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22 AS build +FROM golang:1.23 AS build WORKDIR /src # enable modules caching in separate layer @@ -8,7 +8,7 @@ COPY . ./ RUN make binary -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser index cd1fe90dc0d..943e63eaeee 100644 --- a/Dockerfile.goreleaser +++ b/Dockerfile.goreleaser @@ -1,4 +1,4 @@ -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Dockerfile.scratch b/Dockerfile.scratch index 40b0bec6c94..c0e13abe1e6 100644 --- a/Dockerfile.scratch +++ b/Dockerfile.scratch @@ -1,4 +1,4 @@ -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Makefile b/Makefile index 2c654ea5d7d..90e2a6c781b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ GO ?= go GOBIN ?= $$($(GO) env GOPATH)/bin GOLANGCI_LINT ?= $(GOBIN)/golangci-lint -GOLANGCI_LINT_VERSION ?= v1.55.0 +GOLANGCI_LINT_VERSION ?= v1.61.0 GOGOPROTOBUF ?= protoc-gen-gogofaster GOGOPROTOBUF_VERSION ?= v1.3.1 BEEKEEPER_INSTALL_DIR ?= $(GOBIN) diff --git a/go.mod b/go.mod index 38a8ce09fbd..bda88034b37 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/ethersphere/bee/v2 -go 1.22 +go 1.23 -toolchain go1.22.0 +toolchain go1.23.0 require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 @@ -11,7 +11,7 @@ require ( github.com/coreos/go-semver v0.3.0 github.com/ethereum/go-ethereum v1.14.3 github.com/ethersphere/go-price-oracle-abi v0.2.0 - github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc5 + github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc6 github.com/ethersphere/go-sw3-abi v0.6.5 github.com/ethersphere/langos v1.0.0 github.com/go-playground/validator/v10 v10.11.1 diff --git a/go.sum b/go.sum index 1a206f61c2a..b6b34b27d69 100644 --- a/go.sum +++ b/go.sum @@ -236,8 +236,8 @@ github.com/ethereum/go-ethereum v1.14.3 h1:5zvnAqLtnCZrU9uod1JCvHWJbPMURzYFHfc2e github.com/ethereum/go-ethereum v1.14.3/go.mod h1:1STrq471D0BQbCX9He0hUj4bHxX2k6mt5nOQJhDNOJ8= github.com/ethersphere/go-price-oracle-abi v0.2.0 h1:wtIcYLgNZHY4BjYwJCnu93SvJdVAZVvBaKinspyyHvQ= github.com/ethersphere/go-price-oracle-abi v0.2.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk= -github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc5 h1:orVNqoeAQTavuKmYSJYJF+nfy4wHBL9ZzS3vV1z2K9o= -github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc5/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= +github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc6 h1:3s6c2w9JrToXdiX9vSGxatmRUBzOe57joZk1Xf4SWHU= +github.com/ethersphere/go-storage-incentives-abi v0.9.2-rc6/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= github.com/ethersphere/go-sw3-abi v0.6.5 h1:M5dcIe1zQYvGpY2K07UNkNU9Obc4U+A1fz68Ho/Q+XE= github.com/ethersphere/go-sw3-abi v0.6.5/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU= github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc= diff --git a/openapi/SwarmCommon.yaml b/openapi/SwarmCommon.yaml index f871006c7e2..dcd54c3853a 100644 --- a/openapi/SwarmCommon.yaml +++ b/openapi/SwarmCommon.yaml @@ -928,6 +928,8 @@ components: type: boolean lastSyncedBlock: type: integer + committedDepth: + type: integer StatusResponse: type: object diff --git a/pkg/accesscontrol/grantee.go b/pkg/accesscontrol/grantee.go index 902aebbf433..a7ae1df32a0 100644 --- a/pkg/accesscontrol/grantee.go +++ b/pkg/accesscontrol/grantee.go @@ -7,11 +7,11 @@ package accesscontrol import ( "context" "crypto/ecdsa" - "crypto/elliptic" "errors" "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethersphere/bee/v2/pkg/file" "github.com/ethersphere/bee/v2/pkg/swarm" ) @@ -85,7 +85,10 @@ func (g *GranteeListStruct) Add(addList []*ecdsa.PublicKey) error { // Save saves the grantee list to the underlying storage and returns the reference. func (g *GranteeListStruct) Save(ctx context.Context) (swarm.Address, error) { - data := serialize(g.grantees) + data, err := serialize(g.grantees) + if err != nil { + return swarm.ZeroAddress, fmt.Errorf("grantee serialize error: %w", err) + } refBytes, err := g.loadSave.Save(ctx, data) if err != nil { return swarm.ZeroAddress, fmt.Errorf("grantee save error: %w", err) @@ -140,16 +143,16 @@ func NewGranteeListReference(ctx context.Context, ls file.LoadSaver, reference s }, nil } -func serialize(publicKeys []*ecdsa.PublicKey) []byte { +func serialize(publicKeys []*ecdsa.PublicKey) ([]byte, error) { b := make([]byte, 0, len(publicKeys)*publicKeyLen) for _, key := range publicKeys { - b = append(b, serializePublicKey(key)...) + // TODO: check if this is the correct way to serialize the public key + // Is this the only curve we support? + // Should we have switch case for different curves? + pubBytes := crypto.S256().Marshal(key.X, key.Y) + b = append(b, pubBytes...) } - return b -} - -func serializePublicKey(pub *ecdsa.PublicKey) []byte { - return elliptic.Marshal(pub.Curve, pub.X, pub.Y) + return b, nil } func deserialize(data []byte) []*ecdsa.PublicKey { diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 3021f94405c..bb9ec9ad621 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -387,7 +387,6 @@ func TestParseName(t *testing.T) { s.Mount() s.EnableFullAPI() - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -456,7 +455,6 @@ func TestPostageHeaderError(t *testing.T) { ) content := []byte{7: 0} // 8 zeros for _, endpoint := range endpoints { - endpoint := endpoint t.Run(endpoint+": empty batch", func(t *testing.T) { t.Parallel() @@ -541,7 +539,6 @@ func TestOptions(t *testing.T) { expectedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint+" options test", func(t *testing.T) { t.Parallel() @@ -558,8 +555,6 @@ func TestPostageDirectAndDeferred(t *testing.T) { t.Parallel() for _, endpoint := range []string{"bytes", "bzz", "chunks"} { - endpoint := endpoint - if endpoint != "chunks" { t.Run(endpoint+" deferred", func(t *testing.T) { t.Parallel() @@ -715,7 +710,6 @@ func createRedistributionAgentService( tranService, &mockHealth{}, log.Noop, - 0, ) } diff --git a/pkg/api/balances_test.go b/pkg/api/balances_test.go index d5434c3d3c8..fc6f6a25b3a 100644 --- a/pkg/api/balances_test.go +++ b/pkg/api/balances_test.go @@ -210,7 +210,6 @@ func TestConsumedBalances(t *testing.T) { if !equalBalances(got, expected) { t.Errorf("got balances: %v, expected: %v", got, expected) } - } func TestConsumedError(t *testing.T) { @@ -328,7 +327,6 @@ func Test_peerBalanceHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -377,7 +375,6 @@ func Test_compensatedPeerBalanceHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/bytes_test.go b/pkg/api/bytes_test.go index e2acc99a9d6..f03fa8b973e 100644 --- a/pkg/api/bytes_test.go +++ b/pkg/api/bytes_test.go @@ -271,7 +271,6 @@ func TestBytesInvalidStamp(t *testing.T) { jsonhttptest.WithRequestBody(bytes.NewReader(content)), ) }) - } func TestBytesUploadHandlerInvalidInputs(t *testing.T) { @@ -314,7 +313,6 @@ func TestBytesUploadHandlerInvalidInputs(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -364,7 +362,6 @@ func TestBytesGetHandlerInvalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/bzz_test.go b/pkg/api/bzz_test.go index 7d1e1b27bfe..246ed106778 100644 --- a/pkg/api/bzz_test.go +++ b/pkg/api/bzz_test.go @@ -210,10 +210,8 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { }) } for _, rLevel := range []redundancy.Level{1, 2, 3, 4} { - rLevel := rLevel t.Run(fmt.Sprintf("level=%d", rLevel), func(t *testing.T) { for _, encrypt := range []bool{false, true} { - encrypt := encrypt shardCnt := rLevel.GetMaxShards() parityCnt := rLevel.GetParities(shardCnt) if encrypt { @@ -230,7 +228,6 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { case 3: chunkCnt = shardCnt*shardCnt + 1 } - levels := levels t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d", encrypt, levels, chunkCnt), func(t *testing.T) { if levels > 2 && (encrypt == (rLevel%2 == 1)) { t.Skip("skipping to save time") @@ -619,7 +616,6 @@ func TestBzzFilesRangeRequests(t *testing.T) { } for _, upload := range uploads { - upload := upload t.Run(upload.name, func(t *testing.T) { t.Parallel() @@ -886,7 +882,6 @@ func Test_bzzDownloadHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -934,7 +929,6 @@ func TestInvalidBzzParams(t *testing.T) { jsonhttptest.WithRequestBody(tr), jsonhttptest.WithRequestHeader(api.ContentTypeHeader, api.ContentTypeTar), ) - }) t.Run("batch exists", func(t *testing.T) { @@ -962,7 +956,6 @@ func TestInvalidBzzParams(t *testing.T) { jsonhttptest.WithRequestBody(tr), jsonhttptest.WithRequestHeader(api.ContentTypeHeader, api.ContentTypeTar), ) - }) t.Run("batch not found", func(t *testing.T) { @@ -1057,7 +1050,6 @@ func TestInvalidBzzParams(t *testing.T) { address := "f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b" jsonhttptest.Request(t, client, http.MethodGet, fmt.Sprintf("/bzz/%s/", address), http.StatusNotFound) }) - } // TestDirectUploadBzz tests that the direct upload endpoint give correct error message in dev mode diff --git a/pkg/api/chequebook_test.go b/pkg/api/chequebook_test.go index e0276654d45..86e918a1bad 100644 --- a/pkg/api/chequebook_test.go +++ b/pkg/api/chequebook_test.go @@ -418,7 +418,6 @@ func TestChequebookLastCheques(t *testing.T) { if !LastChequesEqual(got, expected) { t.Fatalf("Got: \n %+v \n\n Expected: \n %+v \n\n", got, expected) } - } func TestChequebookLastChequesPeer(t *testing.T) { @@ -433,7 +432,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { sig := make([]byte, 65) lastSentChequeFunc := func(swarm.Address) (*chequebook.SignedCheque, error) { - sig := make([]byte, 65) lastSentCheque := &chequebook.SignedCheque{ @@ -449,7 +447,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { } lastReceivedChequeFunc := func(swarm.Address) (*chequebook.SignedCheque, error) { - lastReceivedCheque := &chequebook.SignedCheque{ Cheque: chequebook.Cheque{ Beneficiary: beneficiary0, @@ -488,7 +485,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { if !reflect.DeepEqual(got, expected) { t.Fatalf("Got: \n %+v \n\n Expected: \n %+v \n\n", got, expected) } - } func TestChequebookCashout(t *testing.T) { @@ -753,7 +749,6 @@ func Test_chequebookLastPeerHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -765,7 +760,6 @@ func Test_chequebookLastPeerHandler_invalidInputs(t *testing.T) { } func LastChequesEqual(a, b *api.ChequebookLastChequesResponse) bool { - var state bool for akeys := range a.LastCheques { diff --git a/pkg/api/chunk_test.go b/pkg/api/chunk_test.go index 3bbc0558aa8..2c85fa9d2ea 100644 --- a/pkg/api/chunk_test.go +++ b/pkg/api/chunk_test.go @@ -177,7 +177,6 @@ func TestChunkHandlersInvalidInputs(t *testing.T) { method := http.MethodGet for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/cors_test.go b/pkg/api/cors_test.go index 73c3b343414..9a45fd5fade 100644 --- a/pkg/api/cors_test.go +++ b/pkg/api/cors_test.go @@ -79,7 +79,6 @@ func TestCORSHeaders(t *testing.T) { wantCORS: false, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -116,7 +115,6 @@ func TestCORSHeaders(t *testing.T) { } }) } - } // TestCors tests whether CORs work correctly with OPTIONS method @@ -135,7 +133,8 @@ func TestCors(t *testing.T) { { endpoint: "bzz", expectedMethods: "POST", - }, { + }, + { endpoint: "bzz/0101011", expectedMethods: "GET, HEAD", }, @@ -156,7 +155,6 @@ func TestCors(t *testing.T) { expectedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint, func(t *testing.T) { t.Parallel() @@ -212,7 +210,6 @@ func TestCorsStatus(t *testing.T) { allowedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/logger_test.go b/pkg/api/logger_test.go index 1a702164b8a..1e13917535f 100644 --- a/pkg/api/logger_test.go +++ b/pkg/api/logger_test.go @@ -178,7 +178,6 @@ func Test_loggerGetHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -244,7 +243,6 @@ func Test_loggerSetVerbosityHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/peer_test.go b/pkg/api/peer_test.go index fdda7b8edf4..a9386d8d694 100644 --- a/pkg/api/peer_test.go +++ b/pkg/api/peer_test.go @@ -240,7 +240,6 @@ func Test_peerConnectHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -289,7 +288,6 @@ func Test_peerDisconnectHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/pin_test.go b/pkg/api/pin_test.go index a85c34a9594..4af6c20dd10 100644 --- a/pkg/api/pin_test.go +++ b/pkg/api/pin_test.go @@ -136,7 +136,6 @@ func TestPinHandlers(t *testing.T) { rootHash = strings.Trim(header.Get(api.ETagHeader), "\"") checkPinHandlers(t, client, rootHash, false) }) - } func TestPinHandlersInvalidInputs(t *testing.T) { @@ -177,9 +176,7 @@ func TestPinHandlersInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodPost, http.MethodDelete} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() @@ -194,7 +191,6 @@ func TestPinHandlersInvalidInputs(t *testing.T) { const pinRef = "620fcd78c7ce54da2d1b7cc2274a02e190cbe8fecbc3bd244690ab6517ce8f39" func TestIntegrityHandler(t *testing.T) { - t.Parallel() t.Run("ok", func(t *testing.T) { diff --git a/pkg/api/pingpong_test.go b/pkg/api/pingpong_test.go index 9de99103619..20f8af6a06a 100644 --- a/pkg/api/pingpong_test.go +++ b/pkg/api/pingpong_test.go @@ -113,7 +113,6 @@ func Test_pingpongHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/postage_test.go b/pkg/api/postage_test.go index 13e3dda13d6..6ba9c812a69 100644 --- a/pkg/api/postage_test.go +++ b/pkg/api/postage_test.go @@ -366,7 +366,6 @@ func TestPostageGetBuckets(t *testing.T) { jsonhttptest.Request(t, tsNotFound, http.MethodGet, "/stamps/"+batchOkStr+"/buckets", http.StatusNotFound) }) - } func TestReserveState(t *testing.T) { @@ -397,6 +396,7 @@ func TestReserveState(t *testing.T) { ) }) } + func TestChainState(t *testing.T) { t.Parallel() @@ -423,7 +423,6 @@ func TestChainState(t *testing.T) { }), ) }) - } func TestPostageTopUpStamp(t *testing.T) { @@ -683,7 +682,6 @@ func TestPostageDiluteStamp(t *testing.T) { TxHash: txHash.String(), }), ) - }) } @@ -770,8 +768,6 @@ func TestPostageAccessHandler(t *testing.T) { for _, op1 := range success { for _, op2 := range failure { - op1 := op1 - op2 := op2 t.Run(op1.name+"-"+op2.name, func(t *testing.T) { t.Parallel() @@ -914,7 +910,6 @@ func Test_postageGetStampBucketsHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -976,7 +971,6 @@ func Test_postageGetStampHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/pss_test.go b/pkg/api/pss_test.go index 6624e6d8d21..d78f8ea668f 100644 --- a/pkg/api/pss_test.go +++ b/pkg/api/pss_test.go @@ -88,7 +88,6 @@ func TestPssWebsocketSingleHandlerDeregister(t *testing.T) { ) err := cl.SetReadDeadline(time.Now().Add(longTimeout)) - if err != nil { t.Fatal(err) } @@ -435,7 +434,6 @@ func TestPssPostHandlerInvalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -446,10 +444,12 @@ func TestPssPostHandlerInvalidInputs(t *testing.T) { } } -type pssSendFn func(context.Context, pss.Targets, swarm.Chunk) error -type mpss struct { - f pssSendFn -} +type ( + pssSendFn func(context.Context, pss.Targets, swarm.Chunk) error + mpss struct { + f pssSendFn + } +) func newMockPss(f pssSendFn) *mpss { return &mpss{f} diff --git a/pkg/api/router_test.go b/pkg/api/router_test.go index 6f3e89d5eeb..99382427c22 100644 --- a/pkg/api/router_test.go +++ b/pkg/api/router_test.go @@ -407,7 +407,6 @@ func TestEndpointOptions(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/settlements_test.go b/pkg/api/settlements_test.go index f76997e1b8c..ae1f73439f8 100644 --- a/pkg/api/settlements_test.go +++ b/pkg/api/settlements_test.go @@ -78,7 +78,6 @@ func TestSettlements(t *testing.T) { if !equalSettlements(got, expected) { t.Errorf("got settlements: %+v, expected: %+v", got, expected) } - } func TestSettlementsError(t *testing.T) { @@ -208,7 +207,6 @@ func Test_peerSettlementsHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/soc_test.go b/pkg/api/soc_test.go index 2407689ece1..344204f06c6 100644 --- a/pkg/api/soc_test.go +++ b/pkg/api/soc_test.go @@ -90,7 +90,7 @@ func TestSOC(t *testing.T) { // try to fetch the same chunk t.Run("chunks fetch", func(t *testing.T) { - rsrc := fmt.Sprintf("/chunks/" + s.Address().String()) + rsrc := fmt.Sprintf("/chunks/%s", s.Address().String()) resp := request(t, client, http.MethodGet, rsrc, nil, http.StatusOK) data, err := io.ReadAll(resp.Body) if err != nil { @@ -141,7 +141,6 @@ func TestSOC(t *testing.T) { }) t.Run("ok batch", func(t *testing.T) { - s := testingsoc.GenerateMockSOC(t, testData) hexbatch := hex.EncodeToString(batchOk) client, _, _, chanStorer := newTestServer(t, testServerOptions{ diff --git a/pkg/api/staking_test.go b/pkg/api/staking_test.go index 8c84c0f8bb5..9b55b039d88 100644 --- a/pkg/api/staking_test.go +++ b/pkg/api/staking_test.go @@ -190,7 +190,6 @@ func Test_stakingDepositHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/status.go b/pkg/api/status.go index 30e09e1f166..804cd94aef5 100644 --- a/pkg/api/status.go +++ b/pkg/api/status.go @@ -30,6 +30,7 @@ type statusSnapshotResponse struct { BatchCommitment uint64 `json:"batchCommitment"` IsReachable bool `json:"isReachable"` LastSyncedBlock uint64 `json:"lastSyncedBlock"` + CommittedDepth uint8 `json:"committedDepth"` } type statusResponse struct { @@ -94,6 +95,7 @@ func (s *Service) statusGetHandler(w http.ResponseWriter, _ *http.Request) { BatchCommitment: ss.BatchCommitment, IsReachable: ss.IsReachable, LastSyncedBlock: ss.LastSyncedBlock, + CommittedDepth: uint8(ss.CommittedDepth), }) } @@ -141,6 +143,7 @@ func (s *Service) statusGetPeersHandler(w http.ResponseWriter, r *http.Request) snapshot.BatchCommitment = ss.BatchCommitment snapshot.IsReachable = ss.IsReachable snapshot.LastSyncedBlock = ss.LastSyncedBlock + snapshot.CommittedDepth = uint8(ss.CommittedDepth) } mu.Lock() @@ -194,7 +197,7 @@ func (s *Service) statusGetNeighborhoods(w http.ResponseWriter, r *http.Request) neighborhoods = append(neighborhoods, statusNeighborhoodResponse{ Neighborhood: n.Neighborhood.String(), ReserveSizeWithinRadius: n.ReserveSizeWithinRadius, - Proximity: swarm.Proximity(s.overlay.Bytes(), n.Neighborhood.Bytes()), + Proximity: n.Proximity, }) } diff --git a/pkg/api/status_test.go b/pkg/api/status_test.go index 654e94708a8..0ef4dc6c95c 100644 --- a/pkg/api/status_test.go +++ b/pkg/api/status_test.go @@ -40,6 +40,7 @@ func TestGetStatus(t *testing.T) { BatchCommitment: 1, IsReachable: true, LastSyncedBlock: 6092500, + CommittedDepth: 1, } ssMock := &statusSnapshotMock{ @@ -49,6 +50,7 @@ func TestGetStatus(t *testing.T) { storageRadius: ssr.StorageRadius, commitment: ssr.BatchCommitment, chainState: &postage.ChainState{Block: ssr.LastSyncedBlock}, + committedDepth: ssr.CommittedDepth, } statusSvc := status.NewService( @@ -122,6 +124,7 @@ type statusSnapshotMock struct { commitment uint64 chainState *postage.ChainState neighborhoods []*storer.NeighborhoodStat + committedDepth uint8 } func (m *statusSnapshotMock) SyncRate() float64 { return m.syncRate } @@ -135,3 +138,4 @@ func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 { func (m *statusSnapshotMock) NeighborhoodsStat(ctx context.Context) ([]*storer.NeighborhoodStat, error) { return m.neighborhoods, nil } +func (m *statusSnapshotMock) CommittedDepth() uint8 { return m.committedDepth } diff --git a/pkg/api/stewardship_test.go b/pkg/api/stewardship_test.go index d10d9366126..3f5b16e7c2a 100644 --- a/pkg/api/stewardship_test.go +++ b/pkg/api/stewardship_test.go @@ -100,9 +100,7 @@ func TestStewardshipInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodPut} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/subdomain_test.go b/pkg/api/subdomain_test.go index 6f8efc0b0f2..c1f7fbed142 100644 --- a/pkg/api/subdomain_test.go +++ b/pkg/api/subdomain_test.go @@ -88,7 +88,6 @@ func TestSubdomains(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/tag_test.go b/pkg/api/tag_test.go index d3b8edeb4dc..3a7697ddae8 100644 --- a/pkg/api/tag_test.go +++ b/pkg/api/tag_test.go @@ -25,7 +25,6 @@ func tagsWithIdResource(id uint64) string { return fmt.Sprintf("/tags/%d", id) } // nolint:paralleltest func TestTags(t *testing.T) { - var ( tagsResource = "/tags" storerMock = mockstorer.New() @@ -222,9 +221,7 @@ func TestTagsHandlersInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodDelete, http.MethodPatch} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/util_test.go b/pkg/api/util_test.go index 6aa55fbf891..4628ce31270 100644 --- a/pkg/api/util_test.go +++ b/pkg/api/util_test.go @@ -499,8 +499,6 @@ func TestMapStructure(t *testing.T) { want: &mapSwarmAddressTest{SwarmAddressVal: swarm.MustParseHexAddress("1234567890abcdef")}, }} for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/benchmark_test.go b/pkg/bmt/benchmark_test.go index 2f49ce37179..acf0ab4fc83 100644 --- a/pkg/bmt/benchmark_test.go +++ b/pkg/bmt/benchmark_test.go @@ -60,7 +60,7 @@ func benchmarkSHA3(b *testing.B, n int) { // doing it on n testPoolSize each reusing the base hasher // the premise is that this is the minimum computation needed for a BMT // therefore this serves as a theoretical optimum for concurrent implementations -func benchmarkBMTBaseline(b *testing.B, n int) { +func benchmarkBMTBaseline(b *testing.B, _ int) { b.Helper() testData := testutil.RandBytesWithSeed(b, 4096, seed) diff --git a/pkg/bmt/bmt.go b/pkg/bmt/bmt.go index cb19ffb1c39..28d65403bdc 100644 --- a/pkg/bmt/bmt.go +++ b/pkg/bmt/bmt.go @@ -119,9 +119,9 @@ func (h *Hasher) Sum(b []byte) []byte { // with every full segment calls processSection in a go routine. func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - max := h.maxSize - h.size - if l > max { - l = max + maxVal := h.maxSize - h.size + if l > maxVal { + l = maxVal } copy(h.bmt.buffer[h.size:], b) secsize := 2 * h.segmentSize @@ -129,7 +129,7 @@ func (h *Hasher) Write(b []byte) (int, error) { h.offset = h.size % secsize h.size += l to := h.size / secsize - if l == max { + if l == maxVal { to-- } h.pos = to diff --git a/pkg/bmt/bmt_test.go b/pkg/bmt/bmt_test.go index 9bb5589eb6a..0eefe9fdb5d 100644 --- a/pkg/bmt/bmt_test.go +++ b/pkg/bmt/bmt_test.go @@ -60,7 +60,6 @@ func TestHasherEmptyData(t *testing.T) { t.Parallel() for _, count := range testSegmentCounts { - count := count t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { t.Parallel() @@ -88,14 +87,13 @@ func TestSyncHasherCorrectness(t *testing.T) { testData := testutil.RandBytesWithSeed(t, 4096, seed) for _, count := range testSegmentCounts { - count := count t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) { t.Parallel() - max := count * hashSize + maxValue := count * hashSize var incr int capacity := 1 pool := bmt.NewPool(bmt.NewConf(swarm.NewHasher, count, capacity)) - for n := 0; n <= max; n += incr { + for n := 0; n <= maxValue; n += incr { h := pool.Get() incr = 1 + rand.Intn(5) err := testHasherCorrectness(h, testData, n, count) @@ -177,8 +175,6 @@ func TestBMTWriterBuffers(t *testing.T) { t.Parallel() for i, count := range testSegmentCounts { - i, count := i, count - t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/proof_test.go b/pkg/bmt/proof_test.go index d9b4ae19438..ba1c3e7220c 100644 --- a/pkg/bmt/proof_test.go +++ b/pkg/bmt/proof_test.go @@ -44,7 +44,6 @@ func TestProofCorrectness(t *testing.T) { t.Fatal("incorrect segment in proof") } } - } pool := bmt.NewPool(bmt.NewConf(swarm.NewHasher, 128, 128)) @@ -211,7 +210,6 @@ func TestProof(t *testing.T) { } for i := 0; i < 128; i++ { - i := i t.Run(fmt.Sprintf("segmentIndex %d", i), func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/reference/reference_test.go b/pkg/bmt/reference/reference_test.go index f11954d522c..50af9475921 100644 --- a/pkg/bmt/reference/reference_test.go +++ b/pkg/bmt/reference/reference_test.go @@ -106,8 +106,6 @@ func TestRefHasher(t *testing.T) { } { for segCount := x.from; segCount <= x.to; segCount++ { for length := 1; length <= segCount*32; length++ { - length, segCount, x := length, segCount, x - t.Run(fmt.Sprintf("%d_segments_%d_bytes", segCount, length), func(t *testing.T) { t.Parallel() diff --git a/pkg/cac/cac_test.go b/pkg/cac/cac_test.go index 2f1384aa2a0..5008db922df 100644 --- a/pkg/cac/cac_test.go +++ b/pkg/cac/cac_test.go @@ -89,7 +89,6 @@ func TestChunkInvariantsNew(t *testing.T) { wantErr: nil, }, } { - cc := cc t.Run(cc.name, func(t *testing.T) { t.Parallel() @@ -135,7 +134,6 @@ func TestChunkInvariantsNewWithDataSpan(t *testing.T) { wantErr: nil, }, } { - cc := cc t.Run(cc.name, func(t *testing.T) { t.Parallel() @@ -219,7 +217,6 @@ func TestInvalid(t *testing.T) { ), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go index d10c14a9991..77fbeae4a56 100644 --- a/pkg/crypto/crypto.go +++ b/pkg/crypto/crypto.go @@ -14,6 +14,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethersphere/bee/v2/pkg/swarm" "golang.org/x/crypto/sha3" ) @@ -29,7 +30,6 @@ const ( // NewOverlayAddress constructs a Swarm Address from ECDSA public key. func NewOverlayAddress(p ecdsa.PublicKey, networkID uint64, nonce []byte) (swarm.Address, error) { - ethAddr, err := NewEthereumAddress(p) if err != nil { return swarm.ZeroAddress, err @@ -44,7 +44,6 @@ func NewOverlayAddress(p ecdsa.PublicKey, networkID uint64, nonce []byte) (swarm // NewOverlayFromEthereumAddress constructs a Swarm Address for an Ethereum address. func NewOverlayFromEthereumAddress(ethAddr []byte, networkID uint64, nonce []byte) (swarm.Address, error) { - netIDBytes := make([]byte, 8) binary.LittleEndian.PutUint64(netIDBytes, networkID) @@ -116,7 +115,7 @@ func NewEthereumAddress(p ecdsa.PublicKey) ([]byte, error) { if p.X == nil || p.Y == nil { return nil, errors.New("invalid public key") } - pubBytes := elliptic.Marshal(btcec.S256(), p.X, p.Y) + pubBytes := crypto.S256().Marshal(p.X, p.Y) pubHash, err := LegacyKeccak256(pubBytes[1:]) if err != nil { return nil, err diff --git a/pkg/encryption/mock/mock_test.go b/pkg/encryption/mock/mock_test.go index 97e52b7777c..3dce0055802 100644 --- a/pkg/encryption/mock/mock_test.go +++ b/pkg/encryption/mock/mock_test.go @@ -62,7 +62,6 @@ func TestEncryptor_Encrypt(t *testing.T) { wantErr: mock.ErrInvalidXORKey, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -125,7 +124,6 @@ func TestEncryptor_Decrypt(t *testing.T) { wantErr: mock.ErrInvalidXORKey, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/feeds/sequence/sequence.go b/pkg/feeds/sequence/sequence.go index 5184885f1ab..f1f254309ca 100644 --- a/pkg/feeds/sequence/sequence.go +++ b/pkg/feeds/sequence/sequence.go @@ -195,10 +195,10 @@ func (f *asyncFinder) At(ctx context.Context, at int64, after uint64) (ch swarm. } // at launches concurrent lookups at exponential intervals after the starting from further -func (f *asyncFinder) at(ctx context.Context, at int64, min int, i *interval, c chan<- *result, quit <-chan struct{}) { +func (f *asyncFinder) at(ctx context.Context, at int64, minValue int, i *interval, c chan<- *result, quit <-chan struct{}) { var wg sync.WaitGroup - for l := i.level; l > min; l-- { + for l := i.level; l > minValue; l-- { select { case <-quit: // if the parent process quit return diff --git a/pkg/feeds/testing/lookup.go b/pkg/feeds/testing/lookup.go index 8fd852888c8..21656be5b64 100644 --- a/pkg/feeds/testing/lookup.go +++ b/pkg/feeds/testing/lookup.go @@ -116,7 +116,6 @@ func TestFinderFixIntervals(t *testing.T, nextf func() (bool, int64), finderf fu } func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func(storage.Getter, *feeds.Feed) feeds.Lookup, updaterf func(putter storage.Putter, signer crypto.Signer, topic []byte) (feeds.Updater, error)) { - storer := &Timeout{inmemchunkstore.New()} topicStr := "testtopic" topic, err := crypto.LegacyKeccak256([]byte(topicStr)) @@ -188,7 +187,6 @@ func TestFinderRandomIntervals(t *testing.T, finderf func(storage.Getter, *feeds t.Parallel() for j := 0; j < 3; j++ { - j := j t.Run(fmt.Sprintf("random intervals %d", j), func(t *testing.T) { t.Parallel() diff --git a/pkg/file/buffer_test.go b/pkg/file/buffer_test.go index 3416db1084a..9fdf66266b6 100644 --- a/pkg/file/buffer_test.go +++ b/pkg/file/buffer_test.go @@ -34,7 +34,6 @@ func TestChunkPipe(t *testing.T) { {swarm.ChunkSize, swarm.ChunkSize}, // on, on } for i, tc := range dataWrites { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() @@ -130,7 +129,6 @@ func TestCopyBuffer(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("buf_%-4d/data_size_%d", tc.readBufferSize, tc.dataSize), func(t *testing.T) { t.Parallel() @@ -197,7 +195,7 @@ func reader(t *testing.T, bufferSize int, r io.Reader, c chan<- readResult) { defer close(c) - var buf = make([]byte, bufferSize) + buf := make([]byte, bufferSize) for { n, err := r.Read(buf) if errors.Is(err, io.EOF) { diff --git a/pkg/file/joiner/joiner_test.go b/pkg/file/joiner/joiner_test.go index 6d3ecd9f241..ce00ac8d782 100644 --- a/pkg/file/joiner/joiner_test.go +++ b/pkg/file/joiner/joiner_test.go @@ -230,7 +230,7 @@ func TestJoinerMalformed(t *testing.T) { func TestEncryptDecrypt(t *testing.T) { t.Parallel() - var tests = []struct { + tests := []struct { chunkLength int }{ {10}, @@ -243,7 +243,6 @@ func TestEncryptDecrypt(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(fmt.Sprintf("Encrypt %d bytes", tt.chunkLength), func(t *testing.T) { t.Parallel() @@ -333,7 +332,6 @@ func TestSeek(t *testing.T) { size: 2*swarm.ChunkSize*swarm.ChunkSize + 1000, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -611,7 +609,6 @@ func TestPrefetch(t *testing.T) { expRead: 100000, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -1074,7 +1071,6 @@ func TestJoinerRedundancy(t *testing.T) { true, }, } { - tc := tc t.Run(fmt.Sprintf("redundancy=%d encryption=%t", tc.rLevel, tc.encryptChunk), func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1339,12 +1335,10 @@ func TestJoinerRedundancyMultilevel(t *testing.T) { r2level := []int{2, 1, 2, 3, 2} encryptChunk := []bool{false, false, true, true, true} for _, rLevel := range []redundancy.Level{0, 1, 2, 3, 4} { - rLevel := rLevel // speeding up tests by skipping some of them t.Run(fmt.Sprintf("rLevel=%v", rLevel), func(t *testing.T) { t.Parallel() for _, encrypt := range []bool{false, true} { - encrypt := encrypt shardCnt := rLevel.GetMaxShards() if encrypt { shardCnt = rLevel.GetMaxEncShards() @@ -1416,7 +1410,6 @@ func (c *chunkStore) Replace(_ context.Context, ch swarm.Chunk) error { defer c.mu.Unlock() c.chunks[ch.Address().ByteString()] = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp()) return nil - } func (c *chunkStore) Has(_ context.Context, addr swarm.Address) (bool, error) { diff --git a/pkg/file/pipeline/bmt/bmt_test.go b/pkg/file/pipeline/bmt/bmt_test.go index 109a9cc9d42..7fba1d763d0 100644 --- a/pkg/file/pipeline/bmt/bmt_test.go +++ b/pkg/file/pipeline/bmt/bmt_test.go @@ -46,7 +46,6 @@ func TestBmtWriter(t *testing.T) { expErr: bmt.ErrInvalidData, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/builder/builder_test.go b/pkg/file/pipeline/builder/builder_test.go index 9f8070b1510..17091bcc0d7 100644 --- a/pkg/file/pipeline/builder/builder_test.go +++ b/pkg/file/pipeline/builder/builder_test.go @@ -87,7 +87,6 @@ func TestAllVectors(t *testing.T) { for i := 1; i <= 20; i++ { data, expect := test.GetVector(t, i) - i := i t.Run(fmt.Sprintf("data length %d, vector %d", len(data), i), func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/feeder/feeder_test.go b/pkg/file/pipeline/feeder/feeder_test.go index 51c56dd6a3b..6632b854461 100644 --- a/pkg/file/pipeline/feeder/feeder_test.go +++ b/pkg/file/pipeline/feeder/feeder_test.go @@ -75,7 +75,6 @@ func TestFeeder(t *testing.T) { span: 5, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() var results pipeline.PipeWriteArgs @@ -179,7 +178,6 @@ func TestFeederFlush(t *testing.T) { span: 3, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/hashtrie/hashtrie_test.go b/pkg/file/pipeline/hashtrie/hashtrie_test.go index 7d2820a0ae0..b36fede3556 100644 --- a/pkg/file/pipeline/hashtrie/hashtrie_test.go +++ b/pkg/file/pipeline/hashtrie/hashtrie_test.go @@ -84,9 +84,7 @@ func newErasureHashTrieWriter( func TestLevels(t *testing.T) { t.Parallel() - var ( - hashSize = 32 - ) + hashSize := 32 // to create a level wrap we need to do branching^(level-1) writes for _, tc := range []struct { @@ -134,8 +132,6 @@ func TestLevels(t *testing.T) { writes: 16384, }, } { - - tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() @@ -165,7 +161,7 @@ func TestLevels(t *testing.T) { t.Fatal(err) } - //check the span. since write spans are 1 value 1, then expected span == tc.writes + // check the span. since write spans are 1 value 1, then expected span == tc.writes sp := binary.LittleEndian.Uint64(rootch.Data()[:swarm.SpanSize]) if sp != uint64(tc.writes) { t.Fatalf("want span %d got %d", tc.writes, sp) @@ -312,7 +308,6 @@ func TestRedundancy(t *testing.T) { parities: 116, // // 87 (full ch) + 29 (2 ref) }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() subCtx := redundancy.SetLevelInContext(ctx, tc.level) diff --git a/pkg/file/redundancy/getter/getter_test.go b/pkg/file/redundancy/getter/getter_test.go index 6b22230a60d..42566866a3e 100644 --- a/pkg/file/redundancy/getter/getter_test.go +++ b/pkg/file/redundancy/getter/getter_test.go @@ -310,8 +310,6 @@ func checkShardsAvailable(t *testing.T, s storage.ChunkStore, addrs []swarm.Addr t.Helper() eg, ctx := errgroup.WithContext(context.Background()) for i, addr := range addrs { - i := i - addr := addr eg.Go(func() (err error) { var delay time.Duration var ch swarm.Chunk diff --git a/pkg/hive/hive.go b/pkg/hive/hive.go index a78b863af5b..27858cdcf87 100644 --- a/pkg/hive/hive.go +++ b/pkg/hive/hive.go @@ -110,17 +110,17 @@ func (s *Service) Protocol() p2p.ProtocolSpec { var ErrShutdownInProgress = errors.New("shutdown in progress") func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error { - max := maxBatchSize + maxSize := maxBatchSize s.metrics.BroadcastPeers.Inc() s.metrics.BroadcastPeersPeers.Add(float64(len(peers))) for len(peers) > 0 { - if max > len(peers) { - max = len(peers) + if maxSize > len(peers) { + maxSize = len(peers) } // If broadcasting limit is exceeded, return early - if !s.outLimiter.Allow(addressee.ByteString(), max) { + if !s.outLimiter.Allow(addressee.ByteString(), maxSize) { return nil } @@ -130,11 +130,11 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p default: } - if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil { + if err := s.sendPeers(ctx, addressee, peers[:maxSize]); err != nil { return err } - peers = peers[max:] + peers = peers[maxSize:] } return nil @@ -277,13 +277,11 @@ func (s *Service) startCheckPeersHandler() { } func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { - var peersToAdd []swarm.Address mtx := sync.Mutex{} wg := sync.WaitGroup{} addPeer := func(newPeer *pb.BzzAddress, multiUnderlay ma.Multiaddr) { - err := s.sem.Acquire(ctx, 1) if err != nil { return @@ -332,7 +330,6 @@ func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { peersToAdd = append(peersToAdd, bzzAddress.Overlay) mtx.Unlock() }() - } for _, p := range peers.Peers { diff --git a/pkg/hive/hive_test.go b/pkg/hive/hive_test.go index a908c68b899..fdb903cf7e5 100644 --- a/pkg/hive/hive_test.go +++ b/pkg/hive/hive_test.go @@ -241,7 +241,6 @@ func TestBroadcastPeers(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() @@ -366,7 +365,6 @@ func readAndAssertPeersMsgs(in []byte, expectedLen int) ([]pb.Peers, error) { return new(pb.Peers) }, ) - if err != nil { return nil, err } diff --git a/pkg/jsonhttp/handlers_test.go b/pkg/jsonhttp/handlers_test.go index 7ede319580c..c40e4865949 100644 --- a/pkg/jsonhttp/handlers_test.go +++ b/pkg/jsonhttp/handlers_test.go @@ -173,7 +173,6 @@ func TestNewMaxBodyBytesHandler(t *testing.T) { wantCode: http.StatusRequestEntityTooLarge, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/jsonhttp/jsonhttp_test.go b/pkg/jsonhttp/jsonhttp_test.go index cd6a378d5c5..61a17684e5e 100644 --- a/pkg/jsonhttp/jsonhttp_test.go +++ b/pkg/jsonhttp/jsonhttp_test.go @@ -174,7 +174,6 @@ func TestRespond_special(t *testing.T) { wantMessage: "2.4.8.16", }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/keystore/file/key.go b/pkg/keystore/file/key.go index 68e6a2e2edf..dcbe22612f5 100644 --- a/pkg/keystore/file/key.go +++ b/pkg/keystore/file/key.go @@ -83,7 +83,11 @@ func encryptKey(k *ecdsa.PrivateKey, password string, edg keystore.EDG) ([]byte, } addr = a case elliptic.P256(): - addr = elliptic.Marshal(elliptic.P256(), k.PublicKey.X, k.PublicKey.Y) + privKey, err := k.ECDH() + if err != nil { + return nil, fmt.Errorf("generate key: %w", err) + } + addr = privKey.PublicKey().Bytes() default: return nil, fmt.Errorf("unsupported curve: %v", k.PublicKey.Curve) } diff --git a/pkg/keystore/file/service_test.go b/pkg/keystore/file/service_test.go index 4c115c7f79e..f5f90c3231e 100644 --- a/pkg/keystore/file/service_test.go +++ b/pkg/keystore/file/service_test.go @@ -5,8 +5,13 @@ package file_test import ( + "bytes" + "crypto/elliptic" "testing" + "github.com/btcsuite/btcd/btcec/v2" + ethcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore/file" "github.com/ethersphere/bee/v2/pkg/keystore/test" ) @@ -14,7 +19,58 @@ import ( func TestService(t *testing.T) { t.Parallel() - dir := t.TempDir() + t.Run("EDGSecp256_K1", func(t *testing.T) { + test.Service(t, file.New(t.TempDir()), crypto.EDGSecp256_K1) + }) - test.Service(t, file.New(dir)) + t.Run("EDGSecp256_R1", func(t *testing.T) { + test.Service(t, file.New(t.TempDir()), crypto.EDGSecp256_R1) + }) +} + +func TestDeprecatedEllipticMarshal(t *testing.T) { + t.Parallel() + + t.Run("EDGSecp256_K1", func(t *testing.T) { + pk, err := crypto.EDGSecp256_K1.Generate() + if err != nil { + t.Fatal(err) + } + + pubBytes := ethcrypto.S256().Marshal(pk.X, pk.Y) + if len(pubBytes) != 65 { + t.Fatalf("public key bytes length mismatch") + } + + // nolint:staticcheck + pubBytesDeprecated := elliptic.Marshal(btcec.S256(), pk.X, pk.Y) + + if !bytes.Equal(pubBytes, pubBytesDeprecated) { + t.Fatalf("public key bytes mismatch") + } + }) + + t.Run("EDGSecp256_R1", func(t *testing.T) { + pk, err := crypto.EDGSecp256_R1.Generate() + if err != nil { + t.Fatal(err) + } + + pkECDH, err := pk.ECDH() + if err != nil { + t.Fatalf("ecdh failed: %v", err) + } + + pubBytes := pkECDH.PublicKey().Bytes() + if len(pubBytes) != 65 { + t.Fatalf("public key bytes length mismatch") + } + + // nolint:staticcheck + pubBytesDeprecated := elliptic.Marshal(elliptic.P256(), pk.X, pk.Y) + + if !bytes.Equal(pubBytes, pubBytesDeprecated) { + t.Fatalf("public key bytes mismatch") + } + }) } diff --git a/pkg/keystore/mem/service_test.go b/pkg/keystore/mem/service_test.go index ae3d4640cc0..37a944a059d 100644 --- a/pkg/keystore/mem/service_test.go +++ b/pkg/keystore/mem/service_test.go @@ -7,6 +7,7 @@ package mem_test import ( "testing" + "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore/mem" "github.com/ethersphere/bee/v2/pkg/keystore/test" ) @@ -14,5 +15,11 @@ import ( func TestService(t *testing.T) { t.Parallel() - test.Service(t, mem.New()) + t.Run("EDGSecp256_K1", func(t *testing.T) { + test.Service(t, mem.New(), crypto.EDGSecp256_K1) + }) + + t.Run("EDGSecp256_R1", func(t *testing.T) { + test.Service(t, mem.New(), crypto.EDGSecp256_R1) + }) } diff --git a/pkg/keystore/test/test.go b/pkg/keystore/test/test.go index 5eb4a4c1b26..47309a6afe0 100644 --- a/pkg/keystore/test/test.go +++ b/pkg/keystore/test/test.go @@ -9,13 +9,12 @@ import ( "errors" "testing" - "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore" ) // Service is a utility testing function that can be used to test // implementations of the keystore.Service interface. -func Service(t *testing.T, s keystore.Service) { +func Service(t *testing.T, s keystore.Service, edg keystore.EDG) { t.Helper() exists, err := s.Exists("swarm") @@ -27,7 +26,6 @@ func Service(t *testing.T, s keystore.Service) { t.Fatal("should not exist") } - edg := crypto.EDGSecp256_K1 // create a new swarm key k1, created, err := s.Key("swarm", "pass123456", edg) if err != nil { diff --git a/pkg/log/registry.go b/pkg/log/registry.go index d1b71a46f67..a467cfc39cf 100644 --- a/pkg/log/registry.go +++ b/pkg/log/registry.go @@ -96,10 +96,10 @@ func NewLogger(name string, opts ...Option) Logger { // of verbosity of the given logger. func SetVerbosity(l Logger, v Level) error { bl := l.(*logger) - switch newLvl, max := v.get(), Level(bl.v); { + switch newLvl, maxValue := v.get(), Level(bl.v); { case newLvl == VerbosityAll: - bl.setVerbosity(max) - case newLvl > max: + bl.setVerbosity(maxValue) + case newLvl > maxValue: return fmt.Errorf("maximum verbosity %d exceeded for logger: %s", bl.v, bl.id) default: bl.setVerbosity(newLvl) diff --git a/pkg/manifest/mantaray/marshal_test.go b/pkg/manifest/mantaray/marshal_test.go index 48e1ef32420..e0d728602fa 100644 --- a/pkg/manifest/mantaray/marshal_test.go +++ b/pkg/manifest/mantaray/marshal_test.go @@ -7,7 +7,6 @@ package mantaray import ( "bytes" "context" - "encoding/hex" "errors" "reflect" @@ -271,7 +270,6 @@ func Test_UnmarshalBinary(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/manifest/mantaray/node_test.go b/pkg/manifest/mantaray/node_test.go index aa2e2f5c355..b9a8da787d7 100644 --- a/pkg/manifest/mantaray/node_test.go +++ b/pkg/manifest/mantaray/node_test.go @@ -136,7 +136,6 @@ func TestAddAndLookupNode(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -264,7 +263,6 @@ func TestRemove(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -305,7 +303,6 @@ func TestRemove(t *testing.T) { t.Fatalf("expected not found error, got %v", err) } } - }) } } @@ -354,7 +351,6 @@ func TestHasPrefix(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -382,7 +378,6 @@ func TestHasPrefix(t *testing.T) { t.Errorf("expected prefix path %s to be %t, was %t", testPrefix, shouldExist, exists) } } - }) } } diff --git a/pkg/manifest/mantaray/persist.go b/pkg/manifest/mantaray/persist.go index 2bd07d74c94..9c08769a1d0 100644 --- a/pkg/manifest/mantaray/persist.go +++ b/pkg/manifest/mantaray/persist.go @@ -71,7 +71,6 @@ func (n *Node) save(ctx context.Context, s Saver) error { } eg, ectx := errgroup.WithContext(ctx) for _, f := range n.forks { - f := f eg.Go(func() error { return f.Node.save(ectx, s) }) diff --git a/pkg/manifest/mantaray/walker_test.go b/pkg/manifest/mantaray/walker_test.go index e49d72364aa..f6d72687a39 100644 --- a/pkg/manifest/mantaray/walker_test.go +++ b/pkg/manifest/mantaray/walker_test.go @@ -50,8 +50,6 @@ func TestWalkNode(t *testing.T) { }, } { ctx := context.Background() - tc := tc - createTree := func(t *testing.T, toAdd [][]byte) *mantaray.Node { t.Helper() @@ -87,7 +85,6 @@ func TestWalkNode(t *testing.T) { walkedCount := 0 walker := func(path []byte, node *mantaray.Node, err error) error { - if !pathExistsInRightSequence(path, tc.expected, walkedCount) { return fmt.Errorf("walkFn returned unexpected path: %s", path) } @@ -123,7 +120,6 @@ func TestWalkNode(t *testing.T) { walkedCount := 0 walker := func(path []byte, node *mantaray.Node, err error) error { - if !pathExistsInRightSequence(path, tc.expected, walkedCount) { return fmt.Errorf("walkFn returned unexpected path: %s", path) } diff --git a/pkg/manifest/simple/manifest_test.go b/pkg/manifest/simple/manifest_test.go index 4b7f160b603..e34616107ed 100644 --- a/pkg/manifest/simple/manifest_test.go +++ b/pkg/manifest/simple/manifest_test.go @@ -100,7 +100,6 @@ func TestEntries(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -164,7 +163,6 @@ func TestEntries(t *testing.T) { checkLength(t, m, manifestLen-i-1) } - }) } } @@ -198,7 +196,6 @@ func TestMarshal(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -271,7 +268,6 @@ func TestHasPrefix(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -294,7 +290,6 @@ func TestHasPrefix(t *testing.T) { t.Errorf("expected prefix path %s to be %t, was %t", testPrefix, shouldExist, exists) } } - }) } } diff --git a/pkg/manifest/simple/walker_test.go b/pkg/manifest/simple/walker_test.go index c3877ce664e..a0159465d44 100644 --- a/pkg/manifest/simple/walker_test.go +++ b/pkg/manifest/simple/walker_test.go @@ -15,7 +15,6 @@ func TestWalkEntry(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/node/node.go b/pkg/node/node.go index c8c435d8a4a..9c7b433948f 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -911,7 +911,7 @@ func NewBee( return nil, fmt.Errorf("status service: %w", err) } - saludService := salud.New(nodeStatus, kad, localStore, logger, warmupTime, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile, uint8(o.ReserveCapacityDoubling)) + saludService := salud.New(nodeStatus, kad, localStore, logger, warmupTime, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile) b.saludCloser = saludService rC, unsub := saludService.SubscribeNetworkStorageRadius() @@ -1090,7 +1090,6 @@ func NewBee( transactionService, saludService, logger, - uint8(o.ReserveCapacityDoubling), ) if err != nil { return nil, fmt.Errorf("storage incentives agent: %w", err) diff --git a/pkg/p2p/libp2p/internal/breaker/breaker_test.go b/pkg/p2p/libp2p/internal/breaker/breaker_test.go index 2693037abbf..35e33222df4 100644 --- a/pkg/p2p/libp2p/internal/breaker/breaker_test.go +++ b/pkg/p2p/libp2p/internal/breaker/breaker_test.go @@ -73,7 +73,6 @@ func TestExecute(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/libp2p/internal/reacher/reacher_test.go b/pkg/p2p/libp2p/internal/reacher/reacher_test.go index a9cfa005ed5..73ba1842b2d 100644 --- a/pkg/p2p/libp2p/internal/reacher/reacher_test.go +++ b/pkg/p2p/libp2p/internal/reacher/reacher_test.go @@ -61,7 +61,6 @@ func TestPingSuccess(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/libp2p/libp2p.go b/pkg/p2p/libp2p/libp2p.go index da620430fb6..9055cdd8fc8 100644 --- a/pkg/p2p/libp2p/libp2p.go +++ b/pkg/p2p/libp2p/libp2p.go @@ -556,7 +556,6 @@ func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) { func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) { for _, ss := range p.StreamSpecs { - ss := ss id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name)) matcher, err := s.protocolSemverMatcher(id) if err != nil { diff --git a/pkg/p2p/libp2p/static_resolver_test.go b/pkg/p2p/libp2p/static_resolver_test.go index 4aff2ca41c6..c45cce0142f 100644 --- a/pkg/p2p/libp2p/static_resolver_test.go +++ b/pkg/p2p/libp2p/static_resolver_test.go @@ -82,7 +82,6 @@ func TestStaticAddressResolver(t *testing.T) { want: "/dns/ipv4and6.com/tcp/30777/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd", }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/protobuf/protobuf_test.go b/pkg/p2p/protobuf/protobuf_test.go index 5dd675b0687..3a1edf30bf5 100644 --- a/pkg/p2p/protobuf/protobuf_test.go +++ b/pkg/p2p/protobuf/protobuf_test.go @@ -44,7 +44,6 @@ func TestReader_ReadMsg(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -98,7 +97,6 @@ func TestReader_timeout(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -164,7 +162,6 @@ func TestWriter(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -210,7 +207,6 @@ func TestWriter_timeout(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name+"WithContext", func(t *testing.T) { t.Parallel() diff --git a/pkg/postage/postagecontract/contract.go b/pkg/postage/postagecontract/contract.go index b454f249c11..b0e5d06908a 100644 --- a/pkg/postage/postagecontract/contract.go +++ b/pkg/postage/postagecontract/contract.go @@ -170,7 +170,7 @@ func (c *postageContract) sendApproveTransaction(ctx context.Context, amount *bi To: &c.bzzTokenAddress, Data: callData, GasPrice: sctx.GetGasPrice(ctx), - GasLimit: 65000, + GasLimit: max(sctx.GetGasLimit(ctx), c.gasLimit), Value: big.NewInt(0), Description: approveDescription, } diff --git a/pkg/postage/stampissuer_test.go b/pkg/postage/stampissuer_test.go index 3a72b57f3d6..460e1f20458 100644 --- a/pkg/postage/stampissuer_test.go +++ b/pkg/postage/stampissuer_test.go @@ -145,8 +145,6 @@ func TestStampItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -249,7 +247,6 @@ func TestUtilization(t *testing.T) { t.Logf("depth: %d, actual utilization: %f", depth, float64(count)/math.Pow(2, float64(depth))) } - } func bytesToIndex(buf []byte) (bucket, index uint32) { diff --git a/pkg/puller/puller_test.go b/pkg/puller/puller_test.go index 80ae02243f0..29687138b9c 100644 --- a/pkg/puller/puller_test.go +++ b/pkg/puller/puller_test.go @@ -35,7 +35,8 @@ func TestOneSync(t *testing.T) { cursors = []uint64{1000, 1000, 1000} replies = []mockps.SyncReply{ {Bin: 1, Start: 1, Topmost: 1000, Peer: addr}, - {Bin: 2, Start: 1, Topmost: 1001, Peer: addr}} + {Bin: 2, Start: 1, Topmost: 1001, Peer: addr}, + } ) _, _, kad, pullsync := newPuller(t, opts{ @@ -66,7 +67,8 @@ func TestSyncOutsideDepth(t *testing.T) { replies = []mockps.SyncReply{ {Bin: 0, Start: 1, Topmost: 1000, Peer: addr2}, {Bin: 2, Start: 1, Topmost: 1000, Peer: addr}, - {Bin: 3, Start: 1, Topmost: 1000, Peer: addr}} + {Bin: 3, Start: 1, Topmost: 1000, Peer: addr}, + } ) _, _, kad, pullsync := newPuller(t, opts{ @@ -177,7 +179,6 @@ func TestSyncIntervals(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -474,9 +475,7 @@ func TestRadiusIncrease(t *testing.T) { func TestContinueSyncing(t *testing.T) { t.Parallel() - var ( - addr = swarm.RandAddress(t) - ) + addr := swarm.RandAddress(t) _, _, kad, pullsync := newPuller(t, opts{ kad: []kadMock.Option{ @@ -515,7 +514,8 @@ func TestPeerGone(t *testing.T) { addr = swarm.RandAddress(t) replies = []mockps.SyncReply{ {Bin: 0, Start: 1, Topmost: 1001, Peer: addr}, - {Bin: 1, Start: 1, Topmost: 1001, Peer: addr}} + {Bin: 1, Start: 1, Topmost: 1001, Peer: addr}, + } ) p, _, kad, pullsync := newPuller(t, opts{ diff --git a/pkg/replicas/getter_test.go b/pkg/replicas/getter_test.go index f300637c891..d1d727dd5fd 100644 --- a/pkg/replicas/getter_test.go +++ b/pkg/replicas/getter_test.go @@ -181,7 +181,6 @@ func TestGetter(t *testing.T) { } // if j <= c, the original chunk should be retrieved and the context should be cancelled t.Run("retrievals cancelled", func(t *testing.T) { - select { case <-time.After(100 * time.Millisecond): t.Fatal("timed out waiting for context to be cancelled") @@ -233,17 +232,16 @@ func TestGetter(t *testing.T) { } return } - max := 2 - for i := 1; i < tc.level && max < tc.found; i++ { - max = max * 2 + maxValue := 2 + for i := 1; i < tc.level && maxValue < tc.found; i++ { + maxValue = maxValue * 2 } - if attempts > max { - t.Fatalf("too many attempts to retrieve a replica: want at most %v. got %v. latencies %v", max, attempts, latencies) + if attempts > maxValue { + t.Fatalf("too many attempts to retrieve a replica: want at most %v. got %v. latencies %v", maxValue, attempts, latencies) } }) t.Run("dispersion", func(t *testing.T) { - if err := dispersed(redundancy.Level(tc.level), ch, addresses); err != nil { t.Fatalf("addresses are not dispersed: %v", err) } diff --git a/pkg/replicas/putter.go b/pkg/replicas/putter.go index 3404bca5f03..017faae5b9e 100644 --- a/pkg/replicas/putter.go +++ b/pkg/replicas/putter.go @@ -40,7 +40,6 @@ func (p *putter) Put(ctx context.Context, ch swarm.Chunk) (err error) { errc := make(chan error, rlevel.GetReplicaCount()) wg := sync.WaitGroup{} for r := range rr.c { - r := r wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/resolver/client/ens/ens_test.go b/pkg/resolver/client/ens/ens_test.go index aa0d0c0cb64..77253d786ba 100644 --- a/pkg/resolver/client/ens/ens_test.go +++ b/pkg/resolver/client/ens/ens_test.go @@ -53,7 +53,6 @@ func TestNewENSClient(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -186,7 +185,6 @@ func TestResolve(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() diff --git a/pkg/resolver/multiresolver/config_test.go b/pkg/resolver/multiresolver/config_test.go index 9b4c2d502c2..a1195532ec9 100644 --- a/pkg/resolver/multiresolver/config_test.go +++ b/pkg/resolver/multiresolver/config_test.go @@ -118,7 +118,6 @@ func TestParseConnectionStrings(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() diff --git a/pkg/resolver/multiresolver/multiresolver_test.go b/pkg/resolver/multiresolver/multiresolver_test.go index 326cbb69a04..c1f9ba0311a 100644 --- a/pkg/resolver/multiresolver/multiresolver_test.go +++ b/pkg/resolver/multiresolver/multiresolver_test.go @@ -76,7 +76,6 @@ func TestPushResolver(t *testing.T) { } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -241,7 +240,6 @@ func TestResolve(t *testing.T) { } for _, tC := range testCases { - tC := tC t.Run(tC.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go index 397362499b4..d47abf3fd76 100644 --- a/pkg/salud/salud.go +++ b/pkg/salud/salud.go @@ -52,8 +52,6 @@ type service struct { radiusSubsMtx sync.Mutex radiusC []chan uint8 - - capacityDoubling uint8 } func New( @@ -66,20 +64,18 @@ func New( minPeersPerbin int, durPercentile float64, connsPercentile float64, - capacityDoubling uint8, ) *service { metrics := newMetrics() s := &service{ - quit: make(chan struct{}), - logger: logger.WithName(loggerName).Register(), - status: status, - topology: topology, - metrics: metrics, - isSelfHealthy: atomic.NewBool(true), - reserve: reserve, - capacityDoubling: capacityDoubling, + quit: make(chan struct{}), + logger: logger.WithName(loggerName).Register(), + status: status, + topology: topology, + metrics: metrics, + isSelfHealthy: atomic.NewBool(true), + reserve: reserve, } s.wg.Add(1) @@ -173,7 +169,7 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, return } - networkRadius, nHoodRadius := s.radius(peers) + networkRadius, nHoodRadius := s.committedDepth(peers) avgDur := totaldur / float64(len(peers)) pDur := percentileDur(peers, durPercentile) pConns := percentileConns(peers, connsPercentile) @@ -199,8 +195,8 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, continue } - if networkRadius > 0 && peer.status.StorageRadius < uint32(networkRadius-2) { - s.logger.Debug("radius health failure", "radius", peer.status.StorageRadius, "peer_address", peer.addr) + if networkRadius > 0 && peer.status.CommittedDepth < uint32(networkRadius-2) { + s.logger.Debug("radius health failure", "radius", peer.status.CommittedDepth, "peer_address", peer.addr) } else if peer.dur.Seconds() > pDur { s.logger.Debug("response duration below threshold", "duration", peer.dur, "peer_address", peer.addr) } else if peer.status.ConnectedPeers < pConns { @@ -220,12 +216,10 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, } } - networkRadiusEstimation := s.reserve.StorageRadius() + s.capacityDoubling - selfHealth := true - if nHoodRadius == networkRadius && networkRadiusEstimation != networkRadius { + if nHoodRadius == networkRadius && s.reserve.CommittedDepth() != networkRadius { selfHealth = false - s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", networkRadiusEstimation, "network_radius", networkRadius) + s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", s.reserve.CommittedDepth(), "network_radius", networkRadius) } s.isSelfHealthy.Store(selfHealth) @@ -294,24 +288,24 @@ func percentileConns(peers []peer, p float64) uint64 { } // radius finds the most common radius. -func (s *service) radius(peers []peer) (uint8, uint8) { +func (s *service) committedDepth(peers []peer) (uint8, uint8) { - var networkRadius [swarm.MaxBins]int - var nHoodRadius [swarm.MaxBins]int + var networkDepth [swarm.MaxBins]int + var nHoodDepth [swarm.MaxBins]int for _, peer := range peers { - if peer.status.StorageRadius < uint32(swarm.MaxBins) { + if peer.status.CommittedDepth < uint32(swarm.MaxBins) { if peer.neighbor { - nHoodRadius[peer.status.StorageRadius]++ + nHoodDepth[peer.status.CommittedDepth]++ } - networkRadius[peer.status.StorageRadius]++ + networkDepth[peer.status.CommittedDepth]++ } } - networkR := maxIndex(networkRadius[:]) - hoodR := maxIndex(nHoodRadius[:]) + networkD := maxIndex(networkDepth[:]) + hoodD := maxIndex(nHoodDepth[:]) - return uint8(networkR), uint8(hoodR) + return uint8(networkD), uint8(hoodD) } // commitment finds the most common batch commitment. diff --git a/pkg/salud/salud_test.go b/pkg/salud/salud_test.go index 5fc4dda733d..e430bf1c868 100644 --- a/pkg/salud/salud_test.go +++ b/pkg/salud/salud_test.go @@ -31,28 +31,28 @@ func TestSalud(t *testing.T) { t.Parallel() peers := []peer{ // fully healhy - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, // healthy since radius >= most common radius - 2 - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 7}, 1, true}, // radius too low - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 5, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 5, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 5}, 1, false}, // dur too long - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 2, false}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 2, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 2, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 2, false}, // connections not enough - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 90, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 90, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, false}, // commitment wrong - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 35, ReserveSize: 100}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 35, ReserveSize: 100, CommittedDepth: 8}, 1, false}, } statusM := &statusMock{make(map[string]peer)} @@ -66,11 +66,12 @@ func TestSalud(t *testing.T) { topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) reserve := mockstorer.NewReserve( - mockstorer.WithRadius(8), + mockstorer.WithRadius(6), mockstorer.WithReserveSize(100), + mockstorer.WithCapacityDoubling(2), ) - service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 0) + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8) err := spinlock.Wait(time.Minute, func() bool { return len(topM.PeersHealth()) == len(peers) @@ -114,9 +115,10 @@ func TestSelfUnhealthyRadius(t *testing.T) { reserve := mockstorer.NewReserve( mockstorer.WithRadius(7), mockstorer.WithReserveSize(100), + mockstorer.WithCapacityDoubling(0), ) - service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 0) + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8) testutil.CleanupCloser(t, service) err := spinlock.Wait(time.Minute, func() bool { @@ -135,8 +137,8 @@ func TestSelfHealthyCapacityDoubling(t *testing.T) { t.Parallel() peers := []peer{ // fully healhy - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommittedDepth: 8}, 0, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommittedDepth: 8}, 0, true}, } statusM := &statusMock{make(map[string]peer)} @@ -151,9 +153,10 @@ func TestSelfHealthyCapacityDoubling(t *testing.T) { reserve := mockstorer.NewReserve( mockstorer.WithRadius(6), mockstorer.WithReserveSize(100), + mockstorer.WithCapacityDoubling(2), ) - service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8, 2) + service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8) testutil.CleanupCloser(t, service) err := spinlock.Wait(time.Minute, func() bool { @@ -183,7 +186,7 @@ func TestSubToRadius(t *testing.T) { topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) - service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8, 0) + service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8) c, unsub := service.SubscribeNetworkStorageRadius() t.Cleanup(unsub) @@ -216,7 +219,7 @@ func TestUnsub(t *testing.T) { topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...)) - service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8, 0) + service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8) testutil.CleanupCloser(t, service) c, unsub := service.SubscribeNetworkStorageRadius() diff --git a/pkg/settlement/swap/chequebook/export_test.go b/pkg/settlement/swap/chequebook/export_test.go index 008586bc0a8..5a274840522 100644 --- a/pkg/settlement/swap/chequebook/export_test.go +++ b/pkg/settlement/swap/chequebook/export_test.go @@ -1,3 +1,6 @@ +// Copyright 2021 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package chequebook var ( diff --git a/pkg/settlement/swap/export_test.go b/pkg/settlement/swap/export_test.go index fbea8817753..da949633b15 100644 --- a/pkg/settlement/swap/export_test.go +++ b/pkg/settlement/swap/export_test.go @@ -1,3 +1,6 @@ +// Copyright 2021 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package swap var ( diff --git a/pkg/sharky/shard_test.go b/pkg/sharky/shard_test.go index 567a0cf9ad3..f816f69c698 100644 --- a/pkg/sharky/shard_test.go +++ b/pkg/sharky/shard_test.go @@ -31,7 +31,6 @@ func TestLocationSerialization(t *testing.T) { Length: math.MaxUint16, }, } { - tc := tc t.Run(fmt.Sprintf("%d_%d_%d", tc.Shard, tc.Slot, tc.Length), func(t *testing.T) { t.Parallel() diff --git a/pkg/sharky/sharky_test.go b/pkg/sharky/sharky_test.go index 1e3aadd78b4..268a996e7c0 100644 --- a/pkg/sharky/sharky_test.go +++ b/pkg/sharky/sharky_test.go @@ -27,7 +27,7 @@ type dirFS struct { } func (d *dirFS) Open(path string) (fs.File, error) { - return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) + return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0o644) } func TestSingleRetrieval(t *testing.T) { @@ -73,7 +73,6 @@ func TestSingleRetrieval(t *testing.T) { nil, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { cctx, cancel := context.WithTimeout(ctx, 800*time.Millisecond) defer cancel() @@ -83,7 +82,6 @@ func TestSingleRetrieval(t *testing.T) { } if err != nil { return - } buf := make([]byte, datasize) err = s.Read(ctx, loc, buf) @@ -188,7 +186,6 @@ func TestConcurrency(t *testing.T) { eg, ectx := errgroup.WithContext(ctx) // a number of workers write sequential numbers to sharky for k := 0; k < workers; k++ { - k := k eg.Go(func() error { <-start buf := make([]byte, 4) @@ -287,7 +284,6 @@ func TestConcurrency(t *testing.T) { {32, 8, 32}, {64, 32, 64}, } { - c := c t.Run(fmt.Sprintf("workers:%d,shards:%d,size:%d", c.workers, c.shards, c.shardSize), func(t *testing.T) { t.Parallel() test(t, c.workers, c.shards, c.shardSize) diff --git a/pkg/soc/soc.go b/pkg/soc/soc.go index 65b44f2e66e..bf4711cee61 100644 --- a/pkg/soc/soc.go +++ b/pkg/soc/soc.go @@ -130,6 +130,22 @@ func (s *SOC) Sign(signer crypto.Signer) (swarm.Chunk, error) { return s.Chunk() } +// UnwrapCAC extracts the CAC inside the SOC. +func UnwrapCAC(sch swarm.Chunk) (swarm.Chunk, error) { + chunkData := sch.Data() + if len(chunkData) < swarm.SocMinChunkSize { + return nil, errWrongChunkSize + } + + cursor := swarm.HashSize + swarm.SocSignatureSize + ch, err := cac.NewWithDataSpan(chunkData[cursor:]) + if err != nil { + return nil, err + } + + return ch, nil +} + // FromChunk recreates a SOC representation from swarm.Chunk data. func FromChunk(sch swarm.Chunk) (*SOC, error) { chunkData := sch.Data() diff --git a/pkg/soc/soc_test.go b/pkg/soc/soc_test.go index 6346242fa1b..cbf5c341904 100644 --- a/pkg/soc/soc_test.go +++ b/pkg/soc/soc_test.go @@ -327,6 +327,15 @@ func TestFromChunk(t *testing.T) { if !ch.Equal(recoveredSOC.WrappedChunk()) { t.Fatalf("wrapped chunk mismatch. got %s want %s", recoveredSOC.WrappedChunk().Address(), ch.Address()) } + + unwrapped, err := soc.UnwrapCAC(sch) + if err != nil { + t.Fatal(err) + } + + if !ch.Equal(unwrapped) { + t.Fatalf("wrapped chunk mismatch. got %s want %s", recoveredSOC.WrappedChunk().Address(), ch.Address()) + } } func TestCreateAddress(t *testing.T) { diff --git a/pkg/soc/validator_test.go b/pkg/soc/validator_test.go index e6913000149..203e0b1bc84 100644 --- a/pkg/soc/validator_test.go +++ b/pkg/soc/validator_test.go @@ -173,7 +173,6 @@ func TestInvalid(t *testing.T) { }, }, } { - c := c t.Run(c.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/status/internal/pb/status.pb.go b/pkg/status/internal/pb/status.pb.go index fb057b77ae8..d5cd8454235 100644 --- a/pkg/status/internal/pb/status.pb.go +++ b/pkg/status/internal/pb/status.pb.go @@ -74,6 +74,7 @@ type Snapshot struct { IsReachable bool `protobuf:"varint,8,opt,name=IsReachable,proto3" json:"IsReachable,omitempty"` ReserveSizeWithinRadius uint64 `protobuf:"varint,9,opt,name=ReserveSizeWithinRadius,proto3" json:"ReserveSizeWithinRadius,omitempty"` LastSyncedBlock uint64 `protobuf:"varint,10,opt,name=LastSyncedBlock,proto3" json:"LastSyncedBlock,omitempty"` + CommittedDepth uint32 `protobuf:"varint,11,opt,name=CommittedDepth,proto3" json:"CommittedDepth,omitempty"` } func (m *Snapshot) Reset() { *m = Snapshot{} } @@ -179,6 +180,13 @@ func (m *Snapshot) GetLastSyncedBlock() uint64 { return 0 } +func (m *Snapshot) GetCommittedDepth() uint32 { + if m != nil { + return m.CommittedDepth + } + return 0 +} + func init() { proto.RegisterType((*Get)(nil), "status.Get") proto.RegisterType((*Snapshot)(nil), "status.Snapshot") @@ -187,27 +195,28 @@ func init() { func init() { proto.RegisterFile("status.proto", fileDescriptor_dfe4fce6682daf5b) } var fileDescriptor_dfe4fce6682daf5b = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd1, 0xbd, 0x4a, 0x03, 0x41, - 0x10, 0x07, 0xf0, 0x6c, 0xbe, 0x33, 0x26, 0x2a, 0xdb, 0xb8, 0x85, 0x1c, 0x47, 0x10, 0x39, 0x2c, - 0x6c, 0x6c, 0xac, 0x2f, 0x85, 0x08, 0x2a, 0x61, 0xaf, 0x10, 0xec, 0xf6, 0xee, 0x86, 0xdc, 0xe1, - 0x65, 0x37, 0xdc, 0x4e, 0x84, 0xf8, 0x14, 0x16, 0x3e, 0x94, 0x65, 0x4a, 0x4b, 0x49, 0x5e, 0x44, - 0xb2, 0x51, 0x48, 0x4e, 0x2c, 0xe7, 0xb7, 0xcb, 0xec, 0x7f, 0x67, 0xa0, 0x6f, 0x49, 0xd1, 0xdc, - 0x5e, 0xce, 0x4a, 0x43, 0x86, 0xb7, 0xb7, 0xd5, 0xb0, 0x05, 0x8d, 0x1b, 0xa4, 0xe1, 0x7b, 0x03, - 0xba, 0x91, 0x56, 0x33, 0x9b, 0x19, 0xe2, 0x3e, 0x1c, 0x48, 0xb4, 0x58, 0xbe, 0x60, 0x94, 0xbf, - 0xa2, 0x60, 0x3e, 0x0b, 0x9a, 0x72, 0x97, 0xf8, 0x10, 0xfa, 0xe3, 0x79, 0x51, 0xd8, 0x85, 0x4e, - 0xa4, 0x22, 0x14, 0x75, 0x9f, 0x05, 0x4c, 0xee, 0x19, 0x3f, 0x83, 0x41, 0x44, 0xa6, 0x54, 0x13, - 0x94, 0x2a, 0xcd, 0xe7, 0x56, 0x34, 0x7c, 0x16, 0x0c, 0xe4, 0x3e, 0xf2, 0x73, 0x38, 0x1c, 0x19, - 0xad, 0x31, 0x21, 0x4c, 0xc7, 0x88, 0xa5, 0x15, 0x4d, 0xf7, 0x5c, 0x45, 0xf9, 0x05, 0x1c, 0x3f, - 0x60, 0x3e, 0xc9, 0x62, 0x53, 0x66, 0xc6, 0xa4, 0x2e, 0x58, 0xcb, 0xdd, 0xfc, 0xe3, 0x5c, 0x40, - 0x27, 0x44, 0xbc, 0x37, 0x29, 0x8a, 0xb6, 0xcf, 0x82, 0x9e, 0xfc, 0x2d, 0x79, 0x00, 0x47, 0xa1, - 0xa2, 0x24, 0x1b, 0x99, 0xe9, 0x34, 0xa7, 0x29, 0x6a, 0x12, 0x1d, 0xd7, 0xa4, 0xca, 0x9b, 0x19, - 0xdc, 0x5a, 0x89, 0x2a, 0xc9, 0x54, 0x5c, 0xa0, 0xe8, 0xfa, 0x2c, 0xe8, 0xca, 0x5d, 0xe2, 0xd7, - 0x70, 0xb2, 0x33, 0x92, 0xc7, 0x9c, 0xb2, 0x5c, 0xff, 0xfc, 0xb4, 0xe7, 0x7a, 0xfe, 0x77, 0xbc, - 0x49, 0x71, 0xa7, 0x2c, 0x45, 0x0b, 0x9d, 0x60, 0x1a, 0x16, 0x26, 0x79, 0x16, 0xb0, 0x4d, 0x51, - 0xe1, 0xf0, 0xf4, 0x63, 0xe5, 0xb1, 0xe5, 0xca, 0x63, 0x5f, 0x2b, 0x8f, 0xbd, 0xad, 0xbd, 0xda, - 0x72, 0xed, 0xd5, 0x3e, 0xd7, 0x5e, 0xed, 0xa9, 0x3e, 0x8b, 0xe3, 0xb6, 0x5b, 0xe5, 0xd5, 0x77, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x92, 0x33, 0xb8, 0x1e, 0xda, 0x01, 0x00, 0x00, + // 332 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd1, 0x4f, 0x4b, 0xc3, 0x30, + 0x18, 0x06, 0xf0, 0x65, 0xff, 0x97, 0x6d, 0x2a, 0xb9, 0x98, 0x83, 0x94, 0x32, 0x44, 0x8a, 0x07, + 0x2f, 0x5e, 0x3c, 0x77, 0x82, 0x08, 0x2a, 0x23, 0x3d, 0x08, 0xde, 0xd2, 0xf6, 0x65, 0x29, 0x76, + 0x49, 0x69, 0xde, 0x09, 0xf3, 0x53, 0xf8, 0x81, 0xfc, 0x00, 0x1e, 0x77, 0xf4, 0x28, 0xdb, 0x17, + 0x91, 0x65, 0x0a, 0x5b, 0xc5, 0xe3, 0xfb, 0x6b, 0x79, 0xf3, 0xe4, 0x09, 0x1d, 0x58, 0x94, 0x38, + 0xb7, 0x17, 0x45, 0x69, 0xd0, 0xb0, 0xf6, 0x76, 0x1a, 0xb5, 0x68, 0xe3, 0x06, 0x70, 0xf4, 0xde, + 0xa0, 0xdd, 0x48, 0xcb, 0xc2, 0x2a, 0x83, 0xcc, 0xa7, 0x7d, 0x01, 0x16, 0xca, 0x17, 0x88, 0xb2, + 0x57, 0xe0, 0xc4, 0x27, 0x41, 0x53, 0xec, 0x12, 0x1b, 0xd1, 0xc1, 0x64, 0x9e, 0xe7, 0x76, 0xa1, + 0x13, 0x21, 0x11, 0x78, 0xdd, 0x27, 0x01, 0x11, 0x7b, 0xc6, 0x4e, 0xe9, 0x30, 0x42, 0x53, 0xca, + 0x29, 0x08, 0x99, 0x66, 0x73, 0xcb, 0x1b, 0x3e, 0x09, 0x86, 0x62, 0x1f, 0xd9, 0x19, 0x3d, 0x18, + 0x1b, 0xad, 0x21, 0x41, 0x48, 0x27, 0x00, 0xa5, 0xe5, 0x4d, 0x77, 0x5c, 0x45, 0xd9, 0x39, 0x3d, + 0x7a, 0x80, 0x6c, 0xaa, 0x62, 0x53, 0x2a, 0x63, 0x52, 0x17, 0xac, 0xe5, 0xfe, 0xfc, 0xe3, 0x8c, + 0xd3, 0x4e, 0x08, 0x70, 0x6f, 0x52, 0xe0, 0x6d, 0x9f, 0x04, 0x3d, 0xf1, 0x3b, 0xb2, 0x80, 0x1e, + 0x86, 0x12, 0x13, 0x35, 0x36, 0xb3, 0x59, 0x86, 0x33, 0xd0, 0xc8, 0x3b, 0x6e, 0x49, 0x95, 0x37, + 0x1d, 0xdc, 0x5a, 0x01, 0x32, 0x51, 0x32, 0xce, 0x81, 0x77, 0x7d, 0x12, 0x74, 0xc5, 0x2e, 0xb1, + 0x2b, 0x7a, 0xbc, 0x53, 0xc9, 0x63, 0x86, 0x2a, 0xd3, 0x3f, 0x37, 0xed, 0xb9, 0x9d, 0xff, 0x7d, + 0xde, 0xa4, 0xb8, 0x93, 0x16, 0xa3, 0x85, 0x4e, 0x20, 0x0d, 0x73, 0x93, 0x3c, 0x73, 0xba, 0x4d, + 0x51, 0xe1, 0x6d, 0x3b, 0x9b, 0x4c, 0x08, 0xe9, 0x35, 0x14, 0xa8, 0x78, 0xdf, 0x95, 0x58, 0xd1, + 0xf0, 0xe4, 0x63, 0xe5, 0x91, 0xe5, 0xca, 0x23, 0x5f, 0x2b, 0x8f, 0xbc, 0xad, 0xbd, 0xda, 0x72, + 0xed, 0xd5, 0x3e, 0xd7, 0x5e, 0xed, 0xa9, 0x5e, 0xc4, 0x71, 0xdb, 0x3d, 0xf9, 0xe5, 0x77, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x97, 0x7e, 0x47, 0xd4, 0x02, 0x02, 0x00, 0x00, } func (m *Get) Marshal() (dAtA []byte, err error) { @@ -253,6 +262,11 @@ func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.CommittedDepth != 0 { + i = encodeVarintStatus(dAtA, i, uint64(m.CommittedDepth)) + i-- + dAtA[i] = 0x58 + } if m.LastSyncedBlock != 0 { i = encodeVarintStatus(dAtA, i, uint64(m.LastSyncedBlock)) i-- @@ -371,6 +385,9 @@ func (m *Snapshot) Size() (n int) { if m.LastSyncedBlock != 0 { n += 1 + sovStatus(uint64(m.LastSyncedBlock)) } + if m.CommittedDepth != 0 { + n += 1 + sovStatus(uint64(m.CommittedDepth)) + } return n } @@ -658,6 +675,25 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { break } } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommittedDepth", wireType) + } + m.CommittedDepth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommittedDepth |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) diff --git a/pkg/status/internal/pb/status.proto b/pkg/status/internal/pb/status.proto index 1cd76212b79..7885139fd9c 100644 --- a/pkg/status/internal/pb/status.proto +++ b/pkg/status/internal/pb/status.proto @@ -25,4 +25,5 @@ message Snapshot { bool IsReachable = 8; uint64 ReserveSizeWithinRadius = 9; uint64 LastSyncedBlock = 10; + uint32 CommittedDepth = 11; } diff --git a/pkg/status/status.go b/pkg/status/status.go index d8106b4e4fb..38cccc7bea1 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -22,7 +22,7 @@ const loggerName = "status" const ( protocolName = "status" - protocolVersion = "1.1.1" + protocolVersion = "1.1.2" streamName = "status" ) @@ -39,6 +39,7 @@ type Reserve interface { ReserveSize() int ReserveSizeWithinRadius() uint64 StorageRadius() uint8 + CommittedDepth() uint8 } type topologyDriver interface { @@ -86,12 +87,14 @@ func (s *Service) LocalSnapshot() (*Snapshot, error) { reserveSizeWithinRadius uint64 connectedPeers uint64 neighborhoodSize uint64 + committedDepth uint8 ) if s.reserve != nil { storageRadius = s.reserve.StorageRadius() reserveSize = uint64(s.reserve.ReserveSize()) reserveSizeWithinRadius = s.reserve.ReserveSizeWithinRadius() + committedDepth = s.reserve.CommittedDepth() } if s.sync != nil { @@ -128,6 +131,7 @@ func (s *Service) LocalSnapshot() (*Snapshot, error) { BatchCommitment: commitment, IsReachable: s.topologyDriver.IsReachable(), LastSyncedBlock: s.chainState.GetChainState().Block, + CommittedDepth: uint32(committedDepth), }, nil } diff --git a/pkg/status/status_test.go b/pkg/status/status_test.go index 019cdae578c..5c60e887959 100644 --- a/pkg/status/status_test.go +++ b/pkg/status/status_test.go @@ -33,6 +33,7 @@ func TestStatus(t *testing.T) { NeighborhoodSize: 1, IsReachable: true, LastSyncedBlock: 6092500, + CommittedDepth: 1, } sssMock := &statusSnapshotMock{want} @@ -203,3 +204,4 @@ func (m *statusSnapshotMock) GetChainState() *postage.ChainState { func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 { return m.Snapshot.ReserveSizeWithinRadius } +func (m *statusSnapshotMock) CommittedDepth() uint8 { return uint8(m.Snapshot.CommittedDepth) } diff --git a/pkg/storage/migration/index_test.go b/pkg/storage/migration/index_test.go index d32b2ac4caf..ec35859d01f 100644 --- a/pkg/storage/migration/index_test.go +++ b/pkg/storage/migration/index_test.go @@ -184,7 +184,6 @@ func TestStepIndex_BatchSize(t *testing.T) { const populateItemsCount = 128 for i := 1; i <= 2*populateItemsCount; i <<= 1 { - i := i t.Run(fmt.Sprintf("callback called once per item with batch size: %d", i), func(t *testing.T) { t.Parallel() @@ -362,5 +361,4 @@ func assertItemsInRange(t *testing.T, s storage.Store, from, to int) { if err != nil { t.Fatalf("populate store should succeed: %v", err) } - } diff --git a/pkg/storage/migration/migration_test.go b/pkg/storage/migration/migration_test.go index 4201ba9ef9e..20197955c7a 100644 --- a/pkg/storage/migration/migration_test.go +++ b/pkg/storage/migration/migration_test.go @@ -19,9 +19,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/storage/storageutil" ) -var ( - errStep = errors.New("step error") -) +var errStep = errors.New("step error") func TestLatestVersion(t *testing.T) { t.Parallel() @@ -159,7 +157,6 @@ func TestValidateVersions(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() if err := migration.ValidateVersions(tt.input); (err != nil) != tt.wantErr { @@ -329,10 +326,10 @@ func TestTagIDAddressItem_MarshalAndUnmarshal(t *testing.T) { Item: &migration.StorageVersionItem{Version: rand.Uint64()}, Factory: func() storage.Item { return new(migration.StorageVersionItem) }, }, - }} + }, + } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() storagetest.TestItemMarshalAndUnmarshal(t, tc.test) diff --git a/pkg/storage/storagetest/benchmark.go b/pkg/storage/storagetest/benchmark.go index dd762bb2579..ea01d58121c 100644 --- a/pkg/storage/storagetest/benchmark.go +++ b/pkg/storage/storagetest/benchmark.go @@ -65,8 +65,8 @@ func (g *randomValueGenerator) Value(i int) []byte { func makeRandomValueGenerator(r *rand.Rand, ratio float64, valueSize int) randomValueGenerator { b := compressibleBytes(r, ratio, valueSize) - max := maxInt(valueSize, 1024*1024) - for len(b) < max { + maxVal := maxInt(valueSize, 1024*1024) + for len(b) < maxVal { b = append(b, compressibleBytes(r, ratio, valueSize)...) } return randomValueGenerator{b: b, k: valueSize} @@ -352,8 +352,8 @@ type batchDBWriter struct { count int } -func (w *batchDBWriter) commit(max int) { - if w.count >= max { +func (w *batchDBWriter) commit(maxValue int) { + if w.count >= maxValue { _ = w.batch.Commit() w.count = 0 w.batch = w.db.Batch(context.Background()) diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index 63e20b0c4bd..3be9ebb28ea 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -70,7 +70,6 @@ type Agent struct { chainStateGetter postage.ChainStateGetter commitLock sync.Mutex health Health - capacityDoubling uint8 } func New(overlay swarm.Address, @@ -90,7 +89,6 @@ func New(overlay swarm.Address, tranService transaction.Service, health Health, logger log.Logger, - capacityDoubling uint8, ) (*Agent, error) { a := &Agent{ overlay: overlay, @@ -106,7 +104,6 @@ func New(overlay swarm.Address, redistributionStatuser: redistributionStatuser, health: health, chainStateGetter: chainStateGetter, - capacityDoubling: capacityDoubling, } state, err := NewRedistributionState(logger, ethAddress, stateStore, erc20Service, tranService) @@ -394,14 +391,14 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error { func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { // minimum proximity between the achor and the stored chunks - commitedDepth := a.store.StorageRadius() + a.capacityDoubling + committedDepth := a.store.CommittedDepth() if a.state.IsFrozen() { a.logger.Info("skipping round because node is frozen") return false, nil } - isPlaying, err := a.contract.IsPlaying(ctx, commitedDepth) + isPlaying, err := a.contract.IsPlaying(ctx, committedDepth) if err != nil { a.metrics.ErrCheckIsPlaying.Inc() return false, err @@ -434,21 +431,21 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { } now := time.Now() - sample, err := a.makeSample(ctx, commitedDepth) + sample, err := a.makeSample(ctx, committedDepth) if err != nil { return false, err } dur := time.Since(now) a.metrics.SampleDuration.Set(dur.Seconds()) - a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", commitedDepth, "round", round) + a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", committedDepth, "round", round) a.state.SetSampleData(round, sample, dur) return true, nil } -func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData, error) { +func (a *Agent) makeSample(ctx context.Context, committedDepth uint8) (SampleData, error) { salt, err := a.contract.ReserveSalt(ctx) if err != nil { return SampleData{}, err @@ -459,7 +456,7 @@ func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData return SampleData{}, err } - rSample, err := a.store.ReserveSample(ctx, salt, commitedDepth, uint64(timeLimiter), a.minBatchBalance()) + rSample, err := a.store.ReserveSample(ctx, salt, committedDepth, uint64(timeLimiter), a.minBatchBalance()) if err != nil { return SampleData{}, err } @@ -473,7 +470,7 @@ func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData Anchor1: salt, ReserveSampleItems: rSample.Items, ReserveSampleHash: sampleHash, - StorageRadius: commitedDepth, + StorageRadius: committedDepth, } return sample, nil diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go index 0ae0eda22f9..8af6b1d463e 100644 --- a/pkg/storageincentives/agent_test.go +++ b/pkg/storageincentives/agent_test.go @@ -42,58 +42,58 @@ func TestAgent(t *testing.T) { expectedCalls bool balance *big.Int doubling uint8 - }{{ - name: "3 blocks per phase, same block number returns twice", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 1, - expectedCalls: true, - limit: 108, // computed with blocksPerRound * (exptectedCalls + 2) - balance: bigBalance, - doubling: 1, - }, { - name: "3 blocks per phase, block number returns every block", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 1, - expectedCalls: true, - limit: 108, - balance: bigBalance, - doubling: 0, - }, { - name: "no expected calls - block number returns late after each phase", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 6, - expectedCalls: false, - limit: 108, - balance: bigBalance, - doubling: 0, - }, { - name: "4 blocks per phase, block number returns every other block", - blocksPerRound: 12, - blocksPerPhase: 4, - incrementBy: 2, - expectedCalls: true, - limit: 144, - balance: bigBalance, - doubling: 1, - }, { - // This test case is based on previous, but this time agent will not have enough - // balance to participate in the game so no calls are going to be made. - name: "no expected calls - insufficient balance", - blocksPerRound: 12, - blocksPerPhase: 4, - incrementBy: 2, - expectedCalls: false, - limit: 144, - balance: big.NewInt(0), - doubling: 1, - }, + }{ + { + name: "3 blocks per phase, same block number returns twice", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 1, + expectedCalls: true, + limit: 108, // computed with blocksPerRound * (exptectedCalls + 2) + balance: bigBalance, + doubling: 1, + }, { + name: "3 blocks per phase, block number returns every block", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 1, + expectedCalls: true, + limit: 108, + balance: bigBalance, + doubling: 0, + }, { + name: "no expected calls - block number returns late after each phase", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 6, + expectedCalls: false, + limit: 108, + balance: bigBalance, + doubling: 0, + }, { + name: "4 blocks per phase, block number returns every other block", + blocksPerRound: 12, + blocksPerPhase: 4, + incrementBy: 2, + expectedCalls: true, + limit: 144, + balance: bigBalance, + doubling: 1, + }, { + // This test case is based on previous, but this time agent will not have enough + // balance to participate in the game so no calls are going to be made. + name: "no expected calls - insufficient balance", + blocksPerRound: 12, + blocksPerPhase: 4, + incrementBy: 2, + expectedCalls: false, + limit: 144, + balance: big.NewInt(0), + doubling: 1, + }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -182,6 +182,7 @@ func createService( reserve := resMock.NewReserve( resMock.WithRadius(radius), resMock.WithSample(storer.RandSample(t, nil)), + resMock.WithCapacityDoubling(int(doubling)), ) return storageincentives.New( @@ -201,7 +202,6 @@ func createService( transactionmock.New(), &mockHealth{}, log.Noop, - doubling, ) } diff --git a/pkg/storageincentives/soc_mine_test.go b/pkg/storageincentives/soc_mine_test.go index aeec8738676..29a8b5e0898 100644 --- a/pkg/storageincentives/soc_mine_test.go +++ b/pkg/storageincentives/soc_mine_test.go @@ -125,7 +125,6 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd count := 8 // number of parallel workers wg := sync.WaitGroup{} for i := 0; i < count; i++ { - i := i wg.Add(1) eg.Go(func() (err error) { offset := i * 4 diff --git a/pkg/storageincentives/staking/contract.go b/pkg/storageincentives/staking/contract.go index 1ff87c6e2a4..882721fd797 100644 --- a/pkg/storageincentives/staking/contract.go +++ b/pkg/storageincentives/staking/contract.go @@ -253,7 +253,7 @@ func (c *contract) sendApproveTransaction(ctx context.Context, amount *big.Int) To: &c.bzzTokenAddress, Data: callData, GasPrice: sctx.GetGasPrice(ctx), - GasLimit: 65000, + GasLimit: max(sctx.GetGasLimit(ctx), c.gasLimit), Value: big.NewInt(0), Description: approveDescription, } diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go index 2bd951e96de..79536960d94 100644 --- a/pkg/storer/internal/cache/cache_test.go +++ b/pkg/storer/internal/cache/cache_test.go @@ -69,8 +69,6 @@ func TestCacheEntryItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/chunkstamp/chunkstamp_test.go b/pkg/storer/internal/chunkstamp/chunkstamp_test.go index 49a1ef7d5ff..1167a56f10a 100644 --- a/pkg/storer/internal/chunkstamp/chunkstamp_test.go +++ b/pkg/storer/internal/chunkstamp/chunkstamp_test.go @@ -116,8 +116,6 @@ func TestChunkStampItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -225,7 +223,6 @@ func TestStoreLoadDelete(t *testing.T) { }) t.Run("delete all stored stamp index", func(t *testing.T) { - if err := ts.Run(context.Background(), func(s transaction.Store) error { return chunkstamp.Store(s.IndexStore(), ns, chunk) }); err != nil { diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index d426adbe1e6..9e30c1af876 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -84,8 +84,6 @@ func TestRetrievalIndexItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -108,7 +106,7 @@ type memFS struct { } func (m *memFS) Open(path string) (fs.File, error) { - return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644) } func TestChunkStore(t *testing.T) { diff --git a/pkg/storer/internal/pinning/pinning_test.go b/pkg/storer/internal/pinning/pinning_test.go index ace7929997f..21b2b5b5600 100644 --- a/pkg/storer/internal/pinning/pinning_test.go +++ b/pkg/storer/internal/pinning/pinning_test.go @@ -34,7 +34,6 @@ func newTestStorage(t *testing.T) transaction.Storage { } func TestPinStore(t *testing.T) { - tests := make([]pinningCollection, 0, 3) for _, tc := range []struct { @@ -69,7 +68,6 @@ func TestPinStore(t *testing.T) { t.Run("create new collections", func(t *testing.T) { for tCount, tc := range tests { t.Run(fmt.Sprintf("create collection %d", tCount), func(t *testing.T) { - var putter internal.PutterCloserWithReference var err error err = st.Run(context.Background(), func(s transaction.Store) error { @@ -519,8 +517,6 @@ func TestPinCollectionItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/reserve/items_test.go b/pkg/storer/internal/reserve/items_test.go index 0d82c926d62..19c22224d3a 100644 --- a/pkg/storer/internal/reserve/items_test.go +++ b/pkg/storer/internal/reserve/items_test.go @@ -130,8 +130,6 @@ func TestReserveItems(t *testing.T) { } for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index cc48dc38197..853454034d8 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -50,7 +50,6 @@ func New( radiusSetter topology.SetStorageRadiuser, logger log.Logger, ) (*Reserve, error) { - rs := &Reserve{ baseAddr: baseAddr, st: st, @@ -130,7 +129,6 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { var shouldIncReserveSize bool err = r.st.Run(ctx, func(s transaction.Store) error { - oldStampIndex, loadedStampIndex, err := stampindex.LoadOrStore(s.IndexStore(), reserveScope, chunk) if err != nil { return fmt.Errorf("load or store stamp index for chunk %v has fail: %w", chunk, err) @@ -327,7 +325,6 @@ func (r *Reserve) EvictBatchBin( count int, bin uint8, ) (int, error) { - r.multx.Lock(string(batchID)) defer r.multx.Unlock(string(batchID)) @@ -408,7 +405,6 @@ func RemoveChunkWithItem( trx transaction.Store, item *BatchRadiusItem, ) error { - var errs error stamp, _ := chunkstamp.LoadWithBatchID(trx.IndexStore(), reserveScope, item.Address, item.BatchID) @@ -495,7 +491,6 @@ func (r *Reserve) IterateChunksItems(startBin uint8, cb func(*ChunkBinItem) (boo // Reset removes all the entires in the reserve. Must be done before any calls to the reserve. func (r *Reserve) Reset(ctx context.Context) error { - size := r.Size() // step 1: delete epoch timestamp @@ -519,7 +514,6 @@ func (r *Reserve) Reset(ctx context.Context) error { return err } for _, item := range bRitems { - item := item eg.Go(func() error { return r.st.Run(ctx, func(s transaction.Store) error { return errors.Join( @@ -549,7 +543,6 @@ func (r *Reserve) Reset(ctx context.Context) error { return err } for _, item := range sitems { - item := item eg.Go(func() error { return r.st.Run(ctx, func(s transaction.Store) error { return errors.Join( diff --git a/pkg/storer/internal/stampindex/stampindex_test.go b/pkg/storer/internal/stampindex/stampindex_test.go index a04ff2fc50c..98d8090c28f 100644 --- a/pkg/storer/internal/stampindex/stampindex_test.go +++ b/pkg/storer/internal/stampindex/stampindex_test.go @@ -87,8 +87,6 @@ func TestStampIndexItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -116,10 +114,8 @@ func TestStoreLoadDeleteWithStamp(t *testing.T) { ns := fmt.Sprintf("namespace_%d", i) t.Run(ns, func(t *testing.T) { t.Run("store new stamp index", func(t *testing.T) { - err := ts.Run(context.Background(), func(s transaction.Store) error { return stampindex.Store(s.IndexStore(), ns, chunk) - }) if err != nil { t.Fatalf("Store(...): unexpected error: %v", err) @@ -164,7 +160,6 @@ func TestStoreLoadDeleteWithStamp(t *testing.T) { }) t.Run("delete stored stamp index", func(t *testing.T) { - err := ts.Run(context.Background(), func(s transaction.Store) error { return stampindex.Delete(s.IndexStore(), ns, chunk.Stamp()) }) diff --git a/pkg/storer/internal/upload/uploadstore_test.go b/pkg/storer/internal/upload/uploadstore_test.go index 4da6d014bb0..0c9bdb04fd1 100644 --- a/pkg/storer/internal/upload/uploadstore_test.go +++ b/pkg/storer/internal/upload/uploadstore_test.go @@ -118,8 +118,6 @@ func TestPushItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -192,8 +190,6 @@ func TestTagItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -307,8 +303,6 @@ func TestUploadItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -360,8 +354,6 @@ func TestItemNextTagID(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -410,8 +402,6 @@ func TestItemDirtyTagItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -620,7 +610,6 @@ func TestChunkPutter(t *testing.T) { }) t.Run("restart putter", func(t *testing.T) { - var putter internal.PutterCloserWithReference err = ts.Run(context.Background(), func(s transaction.Store) error { @@ -692,7 +681,6 @@ func TestChunkReporter(t *testing.T) { for idx, chunk := range chunktest.GenerateTestRandomChunks(10) { t.Run(fmt.Sprintf("chunk %s", chunk.Address()), func(t *testing.T) { - if err := ts.Run(context.Background(), func(s transaction.Store) error { return putter.Put(context.Background(), s, chunk) }); err != nil { diff --git a/pkg/storer/mock/mockreserve.go b/pkg/storer/mock/mockreserve.go index 897403fe4ce..ba8f13590ef 100644 --- a/pkg/storer/mock/mockreserve.go +++ b/pkg/storer/mock/mockreserve.go @@ -33,7 +33,6 @@ func WithSubscribeResp(chunks []*storer.BinC, err error) Option { func WithChunks(chs ...swarm.Chunk) Option { return optionFunc(func(p *ReserveStore) { for _, c := range chs { - c := c if c.Stamp() != nil { stampHash, _ := c.Stamp().Hash() p.chunks[c.Address().String()+string(c.Stamp().BatchID())+string(stampHash)] = c @@ -141,11 +140,17 @@ func (s *ReserveStore) StorageRadius() uint8 { defer s.mtx.Unlock() return s.radius } + func (s *ReserveStore) SetStorageRadius(r uint8) { s.mtx.Lock() s.radius = r s.mtx.Unlock() } +func (s *ReserveStore) CommittedDepth() uint8 { + s.mtx.Lock() + defer s.mtx.Unlock() + return s.radius + uint8(s.capacityDoubling) +} // IntervalChunks returns a set of chunk in a requested interval. func (s *ReserveStore) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *storer.BinC, func(), <-chan error) { @@ -199,7 +204,7 @@ func (s *ReserveStore) SetCalls() int { // Get chunks. func (s *ReserveStore) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error) { if s.evilAddr.Equal(addr) { - //inject the malicious chunk instead + // inject the malicious chunk instead return s.evilChunk, nil } @@ -227,7 +232,6 @@ func (s *ReserveStore) put(_ context.Context, chs ...swarm.Chunk) error { s.mtx.Lock() defer s.mtx.Unlock() for _, c := range chs { - c := c if s.putHook != nil { if err := s.putHook(c); err != nil { return err diff --git a/pkg/storer/mock/mockstorer.go b/pkg/storer/mock/mockstorer.go index 69e8630d846..6ab457ab759 100644 --- a/pkg/storer/mock/mockstorer.go +++ b/pkg/storer/mock/mockstorer.go @@ -220,6 +220,8 @@ func (m *mockStorer) ChunkStore() storage.ReadOnlyChunkStore { func (m *mockStorer) StorageRadius() uint8 { return 0 } +func (m *mockStorer) CommittedDepth() uint8 { return 0 } + func (m *mockStorer) IsWithinStorageRadius(_ swarm.Address) bool { return true } func (m *mockStorer) DebugInfo(_ context.Context) (storer.Info, error) { diff --git a/pkg/storer/mock/mockstorer_test.go b/pkg/storer/mock/mockstorer_test.go index 6fbee300c24..484dac11262 100644 --- a/pkg/storer/mock/mockstorer_test.go +++ b/pkg/storer/mock/mockstorer_test.go @@ -83,7 +83,7 @@ func TestMockStorer(t *testing.T) { want := storage.ErrNotFound _, have := mockStorer.Session(1) - if !errors.Is(want, have) { + if !errors.Is(have, want) { t.Fatalf("Session(): unexpected error: want %v have %v", want, have) } }) diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index 41d76e66d1a..3ae3e6df99d 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -416,6 +416,14 @@ func (db *DB) StorageRadius() uint8 { return db.reserve.Radius() } +func (db *DB) CommittedDepth() uint8 { + if db.reserve == nil { + return 0 + } + + return uint8(db.reserveOptions.capacityDoubling) + db.reserve.Radius() +} + func (db *DB) ReserveSize() int { if db.reserve == nil { return 0 @@ -500,23 +508,26 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan type NeighborhoodStat struct { Neighborhood swarm.Neighborhood ReserveSizeWithinRadius int + Proximity uint8 } func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) { radius := db.StorageRadius() - - networkRadius := radius + uint8(db.reserveOptions.capacityDoubling) + committedDepth := db.CommittedDepth() prefixes := neighborhoodPrefixes(db.baseAddr, int(radius), db.reserveOptions.capacityDoubling) neighs := make([]*NeighborhoodStat, len(prefixes)) for i, n := range prefixes { - neighs[i] = &NeighborhoodStat{swarm.NewNeighborhood(n, networkRadius), 0} + neighs[i] = &NeighborhoodStat{ + Neighborhood: swarm.NewNeighborhood(n, committedDepth), + ReserveSizeWithinRadius: 0, + Proximity: min(committedDepth, swarm.Proximity(n.Bytes(), db.baseAddr.Bytes()))} } err := db.reserve.IterateChunksItems(0, func(ch *reserve.ChunkBinItem) (bool, error) { for _, n := range neighs { - if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= networkRadius { + if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= committedDepth { n.ReserveSizeWithinRadius++ break } diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go index 564fec4ca6c..79b38ac2ecd 100644 --- a/pkg/storer/reserve_test.go +++ b/pkg/storer/reserve_test.go @@ -670,11 +670,11 @@ func TestNeighborhoodStats(t *testing.T) { t.Parallel() const ( - chunkCountPerPO = 16 - maxPO = 5 - networkRadius uint8 = 4 - doublingFactor uint8 = 2 - localRadius uint8 = networkRadius - doublingFactor + chunkCountPerPO = 16 + maxPO = 5 + committedDepth uint8 = 4 + doublingFactor uint8 = 2 + responsibiliyDepth uint8 = committedDepth - doublingFactor ) mustParse := func(s string) swarm.Address { @@ -706,10 +706,10 @@ func TestNeighborhoodStats(t *testing.T) { testF := func(t *testing.T, st *storer.DB) { t.Helper() - putChunks(baseAddr, int(networkRadius), st) - putChunks(sister1, int(networkRadius), st) - putChunks(sister2, int(networkRadius), st) - putChunks(sister3, int(networkRadius), st) + putChunks(baseAddr, int(committedDepth), st) + putChunks(sister1, int(committedDepth), st) + putChunks(sister2, int(committedDepth), st) + putChunks(sister3, int(committedDepth), st) neighs, err := st.NeighborhoodsStat(context.Background()) if err != nil { @@ -726,12 +726,19 @@ func TestNeighborhoodStats(t *testing.T) { } } - if !neighs[0].Neighborhood.Equal(swarm.NewNeighborhood(baseAddr, networkRadius)) || - !neighs[1].Neighborhood.Equal(swarm.NewNeighborhood(sister1, networkRadius)) || - !neighs[2].Neighborhood.Equal(swarm.NewNeighborhood(sister2, networkRadius)) || - !neighs[3].Neighborhood.Equal(swarm.NewNeighborhood(sister3, networkRadius)) { + if !neighs[0].Neighborhood.Equal(swarm.NewNeighborhood(baseAddr, committedDepth)) || + !neighs[1].Neighborhood.Equal(swarm.NewNeighborhood(sister1, committedDepth)) || + !neighs[2].Neighborhood.Equal(swarm.NewNeighborhood(sister2, committedDepth)) || + !neighs[3].Neighborhood.Equal(swarm.NewNeighborhood(sister3, committedDepth)) { t.Fatal("chunk addresses do not match") } + + if neighs[0].Proximity != committedDepth || + neighs[1].Proximity != 3 || + neighs[2].Proximity != 2 || + neighs[3].Proximity != 2 { + t.Fatalf("wrong proximity") + } } t.Run("disk", func(t *testing.T) { @@ -742,8 +749,8 @@ func TestNeighborhoodStats(t *testing.T) { if err != nil { t.Fatal(err) } - storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(localRadius)) - err = spinlock.Wait(time.Minute, func() bool { return storer.StorageRadius() == localRadius }) + storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(responsibiliyDepth)) + err = spinlock.Wait(time.Minute, func() bool { return storer.StorageRadius() == responsibiliyDepth }) if err != nil { t.Fatal(err) } @@ -757,8 +764,8 @@ func TestNeighborhoodStats(t *testing.T) { if err != nil { t.Fatal(err) } - storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(localRadius)) - err = spinlock.Wait(time.Minute, func() bool { return storer.StorageRadius() == localRadius }) + storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(responsibiliyDepth)) + err = spinlock.Wait(time.Minute, func() bool { return storer.StorageRadius() == responsibiliyDepth }) if err != nil { t.Fatal(err) } diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 409d4b31094..9ecd97423df 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -11,6 +11,7 @@ import ( "fmt" "hash" "math/big" + "runtime" "sort" "sync" "testing" @@ -41,67 +42,6 @@ type Sample struct { Items []SampleItem } -// RandSample returns Sample with random values. -func RandSample(t *testing.T, anchor []byte) Sample { - t.Helper() - - chunks := make([]swarm.Chunk, SampleSize) - for i := 0; i < SampleSize; i++ { - ch := chunk.GenerateTestRandomChunk() - if i%3 == 0 { - ch = chunk.GenerateTestRandomSoChunk(t, ch) - } - chunks[i] = ch - } - - sample, err := MakeSampleUsingChunks(chunks, anchor) - if err != nil { - t.Fatal(err) - } - - return sample -} - -// MakeSampleUsingChunks returns Sample constructed using supplied chunks. -func MakeSampleUsingChunks(chunks []swarm.Chunk, anchor []byte) (Sample, error) { - prefixHasherFactory := func() hash.Hash { - return swarm.NewPrefixHasher(anchor) - } - items := make([]SampleItem, len(chunks)) - for i, ch := range chunks { - tr, err := transformedAddress(bmt.NewHasher(prefixHasherFactory), ch, getChunkType(ch)) - if err != nil { - return Sample{}, err - } - - items[i] = SampleItem{ - TransformedAddress: tr, - ChunkAddress: ch.Address(), - ChunkData: ch.Data(), - Stamp: newStamp(ch.Stamp()), - } - } - - sort.Slice(items, func(i, j int) bool { - return items[i].TransformedAddress.Compare(items[j].TransformedAddress) == -1 - }) - - return Sample{Items: items}, nil -} - -func newStamp(s swarm.Stamp) *postage.Stamp { - return postage.NewStamp(s.BatchID(), s.Index(), s.Timestamp(), s.Sig()) -} - -func getChunkType(chunk swarm.Chunk) swarm.ChunkType { - if cac.Valid(chunk) { - return swarm.ChunkTypeContentAddressed - } else if soc.Valid(chunk) { - return swarm.ChunkTypeSingleOwner - } - return swarm.ChunkTypeUnspecified -} - // ReserveSample generates the sample of reserve storage of a node required for the // storage incentives agent to participate in the lottery round. In order to generate // this sample we need to iterate through all the chunks in the node's reserve and @@ -121,12 +61,13 @@ func getChunkType(chunk swarm.Chunk) swarm.ChunkType { func (db *DB) ReserveSample( ctx context.Context, anchor []byte, - commitedDepth uint8, + committedDepth uint8, consensusTime uint64, minBatchBalance *big.Int, ) (Sample, error) { + g, ctx := errgroup.WithContext(ctx) - chunkC := make(chan *reserve.ChunkBinItem, 64) + allStats := &SampleStats{} statsLock := sync.Mutex{} addStats := func(stats SampleStats) { @@ -144,6 +85,8 @@ func (db *DB) ReserveSample( allStats.BatchesBelowValueDuration = time.Since(t) + chunkC := make(chan *reserve.ChunkBinItem) + // Phase 1: Iterate chunk addresses g.Go(func() error { start := time.Now() @@ -155,7 +98,7 @@ func (db *DB) ReserveSample( }() err := db.reserve.IterateChunksItems(db.StorageRadius(), func(ch *reserve.ChunkBinItem) (bool, error) { - if swarm.Proximity(ch.Address.Bytes(), anchor) < commitedDepth { + if swarm.Proximity(ch.Address.Bytes(), anchor) < committedDepth { return false, nil } select { @@ -170,13 +113,14 @@ func (db *DB) ReserveSample( }) // Phase 2: Get the chunk data and calculate transformed hash - sampleItemChan := make(chan SampleItem, 64) + sampleItemChan := make(chan SampleItem) prefixHasherFactory := func() hash.Hash { return swarm.NewPrefixHasher(anchor) } - const workers = 6 + workers := max(4, runtime.NumCPU()) + db.logger.Debug("reserve sampler workers", "count", workers) for i := 0; i < workers; i++ { g.Go(func() error { @@ -241,6 +185,7 @@ func (db *DB) ReserveSample( }() sampleItems := make([]SampleItem, 0, SampleSize) + // insert function will insert the new item in its correct place. If the sample // size goes beyond what we need we omit the last item. insert := func(item SampleItem) { @@ -316,12 +261,12 @@ func (db *DB) ReserveSample( allStats.TotalDuration = time.Since(t) if err := g.Wait(); err != nil { - db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", committedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err) } - db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", committedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{Stats: *allStats, Items: sampleItems}, nil } @@ -376,20 +321,20 @@ func transformedAddressCAC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address return swarm.NewAddress(taddr), nil } -func transformedAddressSOC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) { +func transformedAddressSOC(hasher *bmt.Hasher, socChunk swarm.Chunk) (swarm.Address, error) { // Calculate transformed address from wrapped chunk - sChunk, err := soc.FromChunk(chunk) + cacChunk, err := soc.UnwrapCAC(socChunk) if err != nil { return swarm.ZeroAddress, err } - taddrCac, err := transformedAddressCAC(hasher, sChunk.WrappedChunk()) + taddrCac, err := transformedAddressCAC(hasher, cacChunk) if err != nil { return swarm.ZeroAddress, err } // Hash address and transformed address to make transformed address for this SOC sHasher := swarm.NewHasher() - if _, err := sHasher.Write(chunk.Address().Bytes()); err != nil { + if _, err := sHasher.Write(socChunk.Address().Bytes()); err != nil { return swarm.ZeroAddress, err } if _, err := sHasher.Write(taddrCac.Bytes()); err != nil { @@ -432,3 +377,64 @@ func (s *SampleStats) add(other SampleStats) { s.ChunkLoadFailed += other.ChunkLoadFailed s.StampLoadFailed += other.StampLoadFailed } + +// RandSample returns Sample with random values. +func RandSample(t *testing.T, anchor []byte) Sample { + t.Helper() + + chunks := make([]swarm.Chunk, SampleSize) + for i := 0; i < SampleSize; i++ { + ch := chunk.GenerateTestRandomChunk() + if i%3 == 0 { + ch = chunk.GenerateTestRandomSoChunk(t, ch) + } + chunks[i] = ch + } + + sample, err := MakeSampleUsingChunks(chunks, anchor) + if err != nil { + t.Fatal(err) + } + + return sample +} + +// MakeSampleUsingChunks returns Sample constructed using supplied chunks. +func MakeSampleUsingChunks(chunks []swarm.Chunk, anchor []byte) (Sample, error) { + prefixHasherFactory := func() hash.Hash { + return swarm.NewPrefixHasher(anchor) + } + items := make([]SampleItem, len(chunks)) + for i, ch := range chunks { + tr, err := transformedAddress(bmt.NewHasher(prefixHasherFactory), ch, getChunkType(ch)) + if err != nil { + return Sample{}, err + } + + items[i] = SampleItem{ + TransformedAddress: tr, + ChunkAddress: ch.Address(), + ChunkData: ch.Data(), + Stamp: newStamp(ch.Stamp()), + } + } + + sort.Slice(items, func(i, j int) bool { + return items[i].TransformedAddress.Compare(items[j].TransformedAddress) == -1 + }) + + return Sample{Items: items}, nil +} + +func newStamp(s swarm.Stamp) *postage.Stamp { + return postage.NewStamp(s.BatchID(), s.Index(), s.Timestamp(), s.Sig()) +} + +func getChunkType(chunk swarm.Chunk) swarm.ChunkType { + if cac.Valid(chunk) { + return swarm.ChunkTypeContentAddressed + } else if soc.Valid(chunk) { + return swarm.ChunkTypeSingleOwner + } + return swarm.ChunkTypeUnspecified +} diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go index 2094596976f..2628807a24e 100644 --- a/pkg/storer/storer.go +++ b/pkg/storer/storer.go @@ -163,6 +163,7 @@ type ReserveStore interface { type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 + CommittedDepth() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go index 532d6ce8ef0..095904ade7d 100644 --- a/pkg/storer/uploadstore_test.go +++ b/pkg/storer/uploadstore_test.go @@ -89,7 +89,6 @@ func testUploadStore(t *testing.T, newStorer func() (*storer.DB, error)) { duplicate: true, }, } { - tc := tc testName := fmt.Sprintf("upload_%d_chunks", len(tc.chunks)) if tc.pin { testName += "_with_pin" @@ -436,7 +435,6 @@ func TestReporter(t *testing.T) { t.Parallel() testReporter(t, func() (*storer.DB, error) { - opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) return storer.New(context.Background(), "", opts) diff --git a/pkg/swarm/swarm_test.go b/pkg/swarm/swarm_test.go index 99bc243da0e..635392fcb62 100644 --- a/pkg/swarm/swarm_test.go +++ b/pkg/swarm/swarm_test.go @@ -54,7 +54,6 @@ func TestAddress(t *testing.T) { want: swarm.NewAddress([]byte{0x35, 0xa2, 0x6b, 0x7b, 0xb6, 0x45, 0x5c, 0xba, 0xbe, 0x7a, 0xe, 0x5, 0xaa, 0xfb, 0xd0, 0xb8, 0xb2, 0x6f, 0xea, 0xc8, 0x43, 0xe3, 0xb9, 0xa6, 0x49, 0x46, 0x8d, 0xe, 0xa3, 0x7a, 0x12, 0xb2}), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -184,7 +183,6 @@ func TestParseBitStr(t *testing.T) { "011100000", }, } { - if addr, err := swarm.ParseBitStrAddress(tc.bitStr); err != nil { t.Fatal(err) } else if got := swarm.Proximity(addr.Bytes(), tc.overlay.Bytes()); got < uint8(len(tc.bitStr)) { diff --git a/pkg/topology/kademlia/mock/kademlia.go b/pkg/topology/kademlia/mock/kademlia.go index cd115bf7186..3fd9362209e 100644 --- a/pkg/topology/kademlia/mock/kademlia.go +++ b/pkg/topology/kademlia/mock/kademlia.go @@ -21,10 +21,7 @@ type AddrTuple struct { func WithEachPeerRevCalls(addrs ...AddrTuple) Option { return optionFunc(func(m *Mock) { - for _, a := range addrs { - a := a - m.eachPeerRev = append(m.eachPeerRev, a) - } + m.eachPeerRev = append(m.eachPeerRev, addrs...) }) } @@ -90,10 +87,7 @@ func (m *Mock) SetStorageRadius(uint8) { func (m *Mock) AddRevPeers(addrs ...AddrTuple) { m.mtx.Lock() defer m.mtx.Unlock() - for _, a := range addrs { - a := a - m.eachPeerRev = append(m.eachPeerRev, a) - } + m.eachPeerRev = append(m.eachPeerRev, addrs...) } // EachConnectedPeer iterates from closest bin to farthest diff --git a/pkg/topology/pslice/pslice_test.go b/pkg/topology/pslice/pslice_test.go index f5cff17f7ae..f0f3ea250ab 100644 --- a/pkg/topology/pslice/pslice_test.go +++ b/pkg/topology/pslice/pslice_test.go @@ -92,7 +92,7 @@ func TestNoPanicOnEmptyRemove(t *testing.T) { t.Parallel() base := swarm.RandAddress(t) - var ps = pslice.New(4, base) + ps := pslice.New(4, base) addr1 := swarm.RandAddressAt(t, base, 2) addr2 := swarm.RandAddressAt(t, base, 2) @@ -276,7 +276,6 @@ func TestBinPeers(t *testing.T) { label: "full-bins", }, } { - tc := tc t.Run(tc.label, func(t *testing.T) { t.Parallel() @@ -314,7 +313,6 @@ func TestBinPeers(t *testing.T) { } func isEqual(a, b []swarm.Address) bool { - if len(a) != len(b) { return false } @@ -359,7 +357,6 @@ func TestIteratorsJumpStop(t *testing.T) { // // check that the stop functionality works correctly testIterator(t, ps, true, true, 1, []swarm.Address{peers[9]}) testIteratorRev(t, ps, true, true, 1, []swarm.Address{peers[0]}) - } func testIteratorRev(t *testing.T, ps *pslice.PSlice, skipNext, stop bool, iterations int, peerseq []swarm.Address) { diff --git a/pkg/traversal/traversal_test.go b/pkg/traversal/traversal_test.go index c3524ebf81b..b2de3f2ae72 100644 --- a/pkg/traversal/traversal_test.go +++ b/pkg/traversal/traversal_test.go @@ -147,7 +147,6 @@ func TestTraversalBytes(t *testing.T) { } for _, tc := range testCases { - tc := tc chunkCount := int(math.Ceil(float64(tc.dataSize) / swarm.ChunkSize)) t.Run(fmt.Sprintf("%d-chunk-%d-bytes", chunkCount, tc.dataSize), func(t *testing.T) { t.Parallel() @@ -242,7 +241,6 @@ func TestTraversalFiles(t *testing.T) { } for _, tc := range testCases { - tc := tc chunkCount := int(math.Ceil(float64(tc.filesSize) / swarm.ChunkSize)) t.Run(fmt.Sprintf("%d-chunk-%d-bytes", chunkCount, tc.filesSize), func(t *testing.T) { t.Parallel() @@ -403,7 +401,6 @@ func TestTraversalManifest(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("%s-%d-files-%d-chunks", defaultMediaType, len(tc.files), tc.wantHashCount), func(t *testing.T) { t.Parallel()