diff --git a/.github/workflows/beekeeper.yml b/.github/workflows/beekeeper.yml index 36c7c7fd069..e8690080146 100644 --- a/.github/workflows/beekeeper.yml +++ b/.github/workflows/beekeeper.yml @@ -8,7 +8,7 @@ on: - "**" env: - K3S_VERSION: "v1.22.17+k3s1" + K3S_VERSION: "v1.30.3+k3s1" REPLICA: 3 RUN_TYPE: "PR RUN" SETUP_CONTRACT_IMAGE: "ethersphere/bee-localchain" diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 371976f1cb0..128e0bae8c5 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -67,10 +67,10 @@ jobs: if: github.ref != 'refs/heads/master' uses: wagoid/commitlint-github-action@v5 - name: GolangCI-Lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: skip-cache: false - version: v1.54.1 + version: v1.61.0 - name: Whitespace check run: make check-whitespace - name: go mod tidy check diff --git a/.golangci.yml b/.golangci.yml index def3e2a3d8d..4e9d42a0380 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,13 +4,12 @@ linters: enable: - asciicheck - bidichk - # - depguard disable temporary until this issue is resolved: https://github.com/golangci/golangci-lint/issues/3906 + - copyloopvar - dogsled - durationcheck - errcheck - errname - errorlint - - exportloopref - forbidigo - gochecknoinits - goconst @@ -33,6 +32,7 @@ linters: - typecheck - unconvert - unused + # - depguard disable temporary until this issue is resolved: https://github.com/golangci/golangci-lint/issues/3906 linters-settings: govet: diff --git a/CODINGSTYLE.md b/CODINGSTYLE.md index 30d6e5a0d5b..6ef48aa3d03 100644 --- a/CODINGSTYLE.md +++ b/CODINGSTYLE.md @@ -161,11 +161,10 @@ Use the Golang [testing package](https://pkg.go.dev/testing) from the standard l ### Parallel Test Execution -Run tests in parallel where possible but don't forget about variable scope gotchas. +Run tests in parallel where possible. ```go for tc := range tt { - tc := tc // must not forget this t.Run(tc.name, func(t *testing.T) { t.Parallel() //execute diff --git a/Dockerfile b/Dockerfile index e0f0738de6c..c957601a9ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22 AS build +FROM golang:1.23 AS build WORKDIR /src # enable modules caching in separate layer @@ -8,7 +8,7 @@ COPY . ./ RUN make binary -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser index cd1fe90dc0d..943e63eaeee 100644 --- a/Dockerfile.goreleaser +++ b/Dockerfile.goreleaser @@ -1,4 +1,4 @@ -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Dockerfile.scratch b/Dockerfile.scratch index 40b0bec6c94..c0e13abe1e6 100644 --- a/Dockerfile.scratch +++ b/Dockerfile.scratch @@ -1,4 +1,4 @@ -FROM debian:12.4-slim +FROM debian:12.7-slim ENV DEBIAN_FRONTEND noninteractive diff --git a/Makefile b/Makefile index 2c654ea5d7d..90e2a6c781b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ GO ?= go GOBIN ?= $$($(GO) env GOPATH)/bin GOLANGCI_LINT ?= $(GOBIN)/golangci-lint -GOLANGCI_LINT_VERSION ?= v1.55.0 +GOLANGCI_LINT_VERSION ?= v1.61.0 GOGOPROTOBUF ?= protoc-gen-gogofaster GOGOPROTOBUF_VERSION ?= v1.3.1 BEEKEEPER_INSTALL_DIR ?= $(GOBIN) diff --git a/go.mod b/go.mod index 906fe3e6486..bda88034b37 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/ethersphere/bee/v2 -go 1.22 +go 1.23 -toolchain go1.22.0 +toolchain go1.23.0 require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 diff --git a/pkg/accesscontrol/grantee.go b/pkg/accesscontrol/grantee.go index 902aebbf433..a7ae1df32a0 100644 --- a/pkg/accesscontrol/grantee.go +++ b/pkg/accesscontrol/grantee.go @@ -7,11 +7,11 @@ package accesscontrol import ( "context" "crypto/ecdsa" - "crypto/elliptic" "errors" "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethersphere/bee/v2/pkg/file" "github.com/ethersphere/bee/v2/pkg/swarm" ) @@ -85,7 +85,10 @@ func (g *GranteeListStruct) Add(addList []*ecdsa.PublicKey) error { // Save saves the grantee list to the underlying storage and returns the reference. func (g *GranteeListStruct) Save(ctx context.Context) (swarm.Address, error) { - data := serialize(g.grantees) + data, err := serialize(g.grantees) + if err != nil { + return swarm.ZeroAddress, fmt.Errorf("grantee serialize error: %w", err) + } refBytes, err := g.loadSave.Save(ctx, data) if err != nil { return swarm.ZeroAddress, fmt.Errorf("grantee save error: %w", err) @@ -140,16 +143,16 @@ func NewGranteeListReference(ctx context.Context, ls file.LoadSaver, reference s }, nil } -func serialize(publicKeys []*ecdsa.PublicKey) []byte { +func serialize(publicKeys []*ecdsa.PublicKey) ([]byte, error) { b := make([]byte, 0, len(publicKeys)*publicKeyLen) for _, key := range publicKeys { - b = append(b, serializePublicKey(key)...) + // TODO: check if this is the correct way to serialize the public key + // Is this the only curve we support? + // Should we have switch case for different curves? + pubBytes := crypto.S256().Marshal(key.X, key.Y) + b = append(b, pubBytes...) } - return b -} - -func serializePublicKey(pub *ecdsa.PublicKey) []byte { - return elliptic.Marshal(pub.Curve, pub.X, pub.Y) + return b, nil } func deserialize(data []byte) []*ecdsa.PublicKey { diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 596327783e6..c01d8dcaf2c 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -384,7 +384,6 @@ func TestParseName(t *testing.T) { s.Mount() s.EnableFullAPI() - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -453,7 +452,6 @@ func TestPostageHeaderError(t *testing.T) { ) content := []byte{7: 0} // 8 zeros for _, endpoint := range endpoints { - endpoint := endpoint t.Run(endpoint+": empty batch", func(t *testing.T) { t.Parallel() @@ -538,7 +536,6 @@ func TestOptions(t *testing.T) { expectedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint+" options test", func(t *testing.T) { t.Parallel() @@ -555,8 +552,6 @@ func TestPostageDirectAndDeferred(t *testing.T) { t.Parallel() for _, endpoint := range []string{"bytes", "bzz", "chunks"} { - endpoint := endpoint - if endpoint != "chunks" { t.Run(endpoint+" deferred", func(t *testing.T) { t.Parallel() diff --git a/pkg/api/balances_test.go b/pkg/api/balances_test.go index d5434c3d3c8..fc6f6a25b3a 100644 --- a/pkg/api/balances_test.go +++ b/pkg/api/balances_test.go @@ -210,7 +210,6 @@ func TestConsumedBalances(t *testing.T) { if !equalBalances(got, expected) { t.Errorf("got balances: %v, expected: %v", got, expected) } - } func TestConsumedError(t *testing.T) { @@ -328,7 +327,6 @@ func Test_peerBalanceHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -377,7 +375,6 @@ func Test_compensatedPeerBalanceHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/bytes_test.go b/pkg/api/bytes_test.go index e2acc99a9d6..f03fa8b973e 100644 --- a/pkg/api/bytes_test.go +++ b/pkg/api/bytes_test.go @@ -271,7 +271,6 @@ func TestBytesInvalidStamp(t *testing.T) { jsonhttptest.WithRequestBody(bytes.NewReader(content)), ) }) - } func TestBytesUploadHandlerInvalidInputs(t *testing.T) { @@ -314,7 +313,6 @@ func TestBytesUploadHandlerInvalidInputs(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -364,7 +362,6 @@ func TestBytesGetHandlerInvalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/bzz_test.go b/pkg/api/bzz_test.go index 7d1e1b27bfe..246ed106778 100644 --- a/pkg/api/bzz_test.go +++ b/pkg/api/bzz_test.go @@ -210,10 +210,8 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { }) } for _, rLevel := range []redundancy.Level{1, 2, 3, 4} { - rLevel := rLevel t.Run(fmt.Sprintf("level=%d", rLevel), func(t *testing.T) { for _, encrypt := range []bool{false, true} { - encrypt := encrypt shardCnt := rLevel.GetMaxShards() parityCnt := rLevel.GetParities(shardCnt) if encrypt { @@ -230,7 +228,6 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { case 3: chunkCnt = shardCnt*shardCnt + 1 } - levels := levels t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d", encrypt, levels, chunkCnt), func(t *testing.T) { if levels > 2 && (encrypt == (rLevel%2 == 1)) { t.Skip("skipping to save time") @@ -619,7 +616,6 @@ func TestBzzFilesRangeRequests(t *testing.T) { } for _, upload := range uploads { - upload := upload t.Run(upload.name, func(t *testing.T) { t.Parallel() @@ -886,7 +882,6 @@ func Test_bzzDownloadHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -934,7 +929,6 @@ func TestInvalidBzzParams(t *testing.T) { jsonhttptest.WithRequestBody(tr), jsonhttptest.WithRequestHeader(api.ContentTypeHeader, api.ContentTypeTar), ) - }) t.Run("batch exists", func(t *testing.T) { @@ -962,7 +956,6 @@ func TestInvalidBzzParams(t *testing.T) { jsonhttptest.WithRequestBody(tr), jsonhttptest.WithRequestHeader(api.ContentTypeHeader, api.ContentTypeTar), ) - }) t.Run("batch not found", func(t *testing.T) { @@ -1057,7 +1050,6 @@ func TestInvalidBzzParams(t *testing.T) { address := "f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b" jsonhttptest.Request(t, client, http.MethodGet, fmt.Sprintf("/bzz/%s/", address), http.StatusNotFound) }) - } // TestDirectUploadBzz tests that the direct upload endpoint give correct error message in dev mode diff --git a/pkg/api/chequebook_test.go b/pkg/api/chequebook_test.go index e0276654d45..86e918a1bad 100644 --- a/pkg/api/chequebook_test.go +++ b/pkg/api/chequebook_test.go @@ -418,7 +418,6 @@ func TestChequebookLastCheques(t *testing.T) { if !LastChequesEqual(got, expected) { t.Fatalf("Got: \n %+v \n\n Expected: \n %+v \n\n", got, expected) } - } func TestChequebookLastChequesPeer(t *testing.T) { @@ -433,7 +432,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { sig := make([]byte, 65) lastSentChequeFunc := func(swarm.Address) (*chequebook.SignedCheque, error) { - sig := make([]byte, 65) lastSentCheque := &chequebook.SignedCheque{ @@ -449,7 +447,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { } lastReceivedChequeFunc := func(swarm.Address) (*chequebook.SignedCheque, error) { - lastReceivedCheque := &chequebook.SignedCheque{ Cheque: chequebook.Cheque{ Beneficiary: beneficiary0, @@ -488,7 +485,6 @@ func TestChequebookLastChequesPeer(t *testing.T) { if !reflect.DeepEqual(got, expected) { t.Fatalf("Got: \n %+v \n\n Expected: \n %+v \n\n", got, expected) } - } func TestChequebookCashout(t *testing.T) { @@ -753,7 +749,6 @@ func Test_chequebookLastPeerHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -765,7 +760,6 @@ func Test_chequebookLastPeerHandler_invalidInputs(t *testing.T) { } func LastChequesEqual(a, b *api.ChequebookLastChequesResponse) bool { - var state bool for akeys := range a.LastCheques { diff --git a/pkg/api/chunk_test.go b/pkg/api/chunk_test.go index 3bbc0558aa8..2c85fa9d2ea 100644 --- a/pkg/api/chunk_test.go +++ b/pkg/api/chunk_test.go @@ -177,7 +177,6 @@ func TestChunkHandlersInvalidInputs(t *testing.T) { method := http.MethodGet for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/cors_test.go b/pkg/api/cors_test.go index 73c3b343414..9a45fd5fade 100644 --- a/pkg/api/cors_test.go +++ b/pkg/api/cors_test.go @@ -79,7 +79,6 @@ func TestCORSHeaders(t *testing.T) { wantCORS: false, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -116,7 +115,6 @@ func TestCORSHeaders(t *testing.T) { } }) } - } // TestCors tests whether CORs work correctly with OPTIONS method @@ -135,7 +133,8 @@ func TestCors(t *testing.T) { { endpoint: "bzz", expectedMethods: "POST", - }, { + }, + { endpoint: "bzz/0101011", expectedMethods: "GET, HEAD", }, @@ -156,7 +155,6 @@ func TestCors(t *testing.T) { expectedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint, func(t *testing.T) { t.Parallel() @@ -212,7 +210,6 @@ func TestCorsStatus(t *testing.T) { allowedMethods: "GET, HEAD", }, } { - tc := tc t.Run(tc.endpoint, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/logger_test.go b/pkg/api/logger_test.go index 1a702164b8a..1e13917535f 100644 --- a/pkg/api/logger_test.go +++ b/pkg/api/logger_test.go @@ -178,7 +178,6 @@ func Test_loggerGetHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -244,7 +243,6 @@ func Test_loggerSetVerbosityHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/peer_test.go b/pkg/api/peer_test.go index fdda7b8edf4..a9386d8d694 100644 --- a/pkg/api/peer_test.go +++ b/pkg/api/peer_test.go @@ -240,7 +240,6 @@ func Test_peerConnectHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -289,7 +288,6 @@ func Test_peerDisconnectHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/pin_test.go b/pkg/api/pin_test.go index a85c34a9594..4af6c20dd10 100644 --- a/pkg/api/pin_test.go +++ b/pkg/api/pin_test.go @@ -136,7 +136,6 @@ func TestPinHandlers(t *testing.T) { rootHash = strings.Trim(header.Get(api.ETagHeader), "\"") checkPinHandlers(t, client, rootHash, false) }) - } func TestPinHandlersInvalidInputs(t *testing.T) { @@ -177,9 +176,7 @@ func TestPinHandlersInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodPost, http.MethodDelete} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() @@ -194,7 +191,6 @@ func TestPinHandlersInvalidInputs(t *testing.T) { const pinRef = "620fcd78c7ce54da2d1b7cc2274a02e190cbe8fecbc3bd244690ab6517ce8f39" func TestIntegrityHandler(t *testing.T) { - t.Parallel() t.Run("ok", func(t *testing.T) { diff --git a/pkg/api/pingpong_test.go b/pkg/api/pingpong_test.go index 9de99103619..20f8af6a06a 100644 --- a/pkg/api/pingpong_test.go +++ b/pkg/api/pingpong_test.go @@ -113,7 +113,6 @@ func Test_pingpongHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/postage_test.go b/pkg/api/postage_test.go index 13e3dda13d6..6ba9c812a69 100644 --- a/pkg/api/postage_test.go +++ b/pkg/api/postage_test.go @@ -366,7 +366,6 @@ func TestPostageGetBuckets(t *testing.T) { jsonhttptest.Request(t, tsNotFound, http.MethodGet, "/stamps/"+batchOkStr+"/buckets", http.StatusNotFound) }) - } func TestReserveState(t *testing.T) { @@ -397,6 +396,7 @@ func TestReserveState(t *testing.T) { ) }) } + func TestChainState(t *testing.T) { t.Parallel() @@ -423,7 +423,6 @@ func TestChainState(t *testing.T) { }), ) }) - } func TestPostageTopUpStamp(t *testing.T) { @@ -683,7 +682,6 @@ func TestPostageDiluteStamp(t *testing.T) { TxHash: txHash.String(), }), ) - }) } @@ -770,8 +768,6 @@ func TestPostageAccessHandler(t *testing.T) { for _, op1 := range success { for _, op2 := range failure { - op1 := op1 - op2 := op2 t.Run(op1.name+"-"+op2.name, func(t *testing.T) { t.Parallel() @@ -914,7 +910,6 @@ func Test_postageGetStampBucketsHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -976,7 +971,6 @@ func Test_postageGetStampHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/pss_test.go b/pkg/api/pss_test.go index 6624e6d8d21..d78f8ea668f 100644 --- a/pkg/api/pss_test.go +++ b/pkg/api/pss_test.go @@ -88,7 +88,6 @@ func TestPssWebsocketSingleHandlerDeregister(t *testing.T) { ) err := cl.SetReadDeadline(time.Now().Add(longTimeout)) - if err != nil { t.Fatal(err) } @@ -435,7 +434,6 @@ func TestPssPostHandlerInvalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -446,10 +444,12 @@ func TestPssPostHandlerInvalidInputs(t *testing.T) { } } -type pssSendFn func(context.Context, pss.Targets, swarm.Chunk) error -type mpss struct { - f pssSendFn -} +type ( + pssSendFn func(context.Context, pss.Targets, swarm.Chunk) error + mpss struct { + f pssSendFn + } +) func newMockPss(f pssSendFn) *mpss { return &mpss{f} diff --git a/pkg/api/router_test.go b/pkg/api/router_test.go index 6f3e89d5eeb..99382427c22 100644 --- a/pkg/api/router_test.go +++ b/pkg/api/router_test.go @@ -407,7 +407,6 @@ func TestEndpointOptions(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/settlements_test.go b/pkg/api/settlements_test.go index f76997e1b8c..ae1f73439f8 100644 --- a/pkg/api/settlements_test.go +++ b/pkg/api/settlements_test.go @@ -78,7 +78,6 @@ func TestSettlements(t *testing.T) { if !equalSettlements(got, expected) { t.Errorf("got settlements: %+v, expected: %+v", got, expected) } - } func TestSettlementsError(t *testing.T) { @@ -208,7 +207,6 @@ func Test_peerSettlementsHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/soc_test.go b/pkg/api/soc_test.go index 2407689ece1..344204f06c6 100644 --- a/pkg/api/soc_test.go +++ b/pkg/api/soc_test.go @@ -90,7 +90,7 @@ func TestSOC(t *testing.T) { // try to fetch the same chunk t.Run("chunks fetch", func(t *testing.T) { - rsrc := fmt.Sprintf("/chunks/" + s.Address().String()) + rsrc := fmt.Sprintf("/chunks/%s", s.Address().String()) resp := request(t, client, http.MethodGet, rsrc, nil, http.StatusOK) data, err := io.ReadAll(resp.Body) if err != nil { @@ -141,7 +141,6 @@ func TestSOC(t *testing.T) { }) t.Run("ok batch", func(t *testing.T) { - s := testingsoc.GenerateMockSOC(t, testData) hexbatch := hex.EncodeToString(batchOk) client, _, _, chanStorer := newTestServer(t, testServerOptions{ diff --git a/pkg/api/staking_test.go b/pkg/api/staking_test.go index 8c84c0f8bb5..9b55b039d88 100644 --- a/pkg/api/staking_test.go +++ b/pkg/api/staking_test.go @@ -190,7 +190,6 @@ func Test_stakingDepositHandler_invalidInputs(t *testing.T) { }} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/stewardship_test.go b/pkg/api/stewardship_test.go index d10d9366126..3f5b16e7c2a 100644 --- a/pkg/api/stewardship_test.go +++ b/pkg/api/stewardship_test.go @@ -100,9 +100,7 @@ func TestStewardshipInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodPut} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/subdomain_test.go b/pkg/api/subdomain_test.go index 6f8efc0b0f2..c1f7fbed142 100644 --- a/pkg/api/subdomain_test.go +++ b/pkg/api/subdomain_test.go @@ -88,7 +88,6 @@ func TestSubdomains(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/tag_test.go b/pkg/api/tag_test.go index d3b8edeb4dc..3a7697ddae8 100644 --- a/pkg/api/tag_test.go +++ b/pkg/api/tag_test.go @@ -25,7 +25,6 @@ func tagsWithIdResource(id uint64) string { return fmt.Sprintf("/tags/%d", id) } // nolint:paralleltest func TestTags(t *testing.T) { - var ( tagsResource = "/tags" storerMock = mockstorer.New() @@ -222,9 +221,7 @@ func TestTagsHandlersInvalidInputs(t *testing.T) { }} for _, method := range []string{http.MethodGet, http.MethodDelete, http.MethodPatch} { - method := method for _, tc := range tests { - tc := tc t.Run(method+" "+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/api/util_test.go b/pkg/api/util_test.go index 6aa55fbf891..4628ce31270 100644 --- a/pkg/api/util_test.go +++ b/pkg/api/util_test.go @@ -499,8 +499,6 @@ func TestMapStructure(t *testing.T) { want: &mapSwarmAddressTest{SwarmAddressVal: swarm.MustParseHexAddress("1234567890abcdef")}, }} for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/benchmark_test.go b/pkg/bmt/benchmark_test.go index 2f49ce37179..acf0ab4fc83 100644 --- a/pkg/bmt/benchmark_test.go +++ b/pkg/bmt/benchmark_test.go @@ -60,7 +60,7 @@ func benchmarkSHA3(b *testing.B, n int) { // doing it on n testPoolSize each reusing the base hasher // the premise is that this is the minimum computation needed for a BMT // therefore this serves as a theoretical optimum for concurrent implementations -func benchmarkBMTBaseline(b *testing.B, n int) { +func benchmarkBMTBaseline(b *testing.B, _ int) { b.Helper() testData := testutil.RandBytesWithSeed(b, 4096, seed) diff --git a/pkg/bmt/bmt.go b/pkg/bmt/bmt.go index cb19ffb1c39..28d65403bdc 100644 --- a/pkg/bmt/bmt.go +++ b/pkg/bmt/bmt.go @@ -119,9 +119,9 @@ func (h *Hasher) Sum(b []byte) []byte { // with every full segment calls processSection in a go routine. func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - max := h.maxSize - h.size - if l > max { - l = max + maxVal := h.maxSize - h.size + if l > maxVal { + l = maxVal } copy(h.bmt.buffer[h.size:], b) secsize := 2 * h.segmentSize @@ -129,7 +129,7 @@ func (h *Hasher) Write(b []byte) (int, error) { h.offset = h.size % secsize h.size += l to := h.size / secsize - if l == max { + if l == maxVal { to-- } h.pos = to diff --git a/pkg/bmt/bmt_test.go b/pkg/bmt/bmt_test.go index 9bb5589eb6a..0eefe9fdb5d 100644 --- a/pkg/bmt/bmt_test.go +++ b/pkg/bmt/bmt_test.go @@ -60,7 +60,6 @@ func TestHasherEmptyData(t *testing.T) { t.Parallel() for _, count := range testSegmentCounts { - count := count t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { t.Parallel() @@ -88,14 +87,13 @@ func TestSyncHasherCorrectness(t *testing.T) { testData := testutil.RandBytesWithSeed(t, 4096, seed) for _, count := range testSegmentCounts { - count := count t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) { t.Parallel() - max := count * hashSize + maxValue := count * hashSize var incr int capacity := 1 pool := bmt.NewPool(bmt.NewConf(swarm.NewHasher, count, capacity)) - for n := 0; n <= max; n += incr { + for n := 0; n <= maxValue; n += incr { h := pool.Get() incr = 1 + rand.Intn(5) err := testHasherCorrectness(h, testData, n, count) @@ -177,8 +175,6 @@ func TestBMTWriterBuffers(t *testing.T) { t.Parallel() for i, count := range testSegmentCounts { - i, count := i, count - t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/proof_test.go b/pkg/bmt/proof_test.go index d9b4ae19438..ba1c3e7220c 100644 --- a/pkg/bmt/proof_test.go +++ b/pkg/bmt/proof_test.go @@ -44,7 +44,6 @@ func TestProofCorrectness(t *testing.T) { t.Fatal("incorrect segment in proof") } } - } pool := bmt.NewPool(bmt.NewConf(swarm.NewHasher, 128, 128)) @@ -211,7 +210,6 @@ func TestProof(t *testing.T) { } for i := 0; i < 128; i++ { - i := i t.Run(fmt.Sprintf("segmentIndex %d", i), func(t *testing.T) { t.Parallel() diff --git a/pkg/bmt/reference/reference_test.go b/pkg/bmt/reference/reference_test.go index f11954d522c..50af9475921 100644 --- a/pkg/bmt/reference/reference_test.go +++ b/pkg/bmt/reference/reference_test.go @@ -106,8 +106,6 @@ func TestRefHasher(t *testing.T) { } { for segCount := x.from; segCount <= x.to; segCount++ { for length := 1; length <= segCount*32; length++ { - length, segCount, x := length, segCount, x - t.Run(fmt.Sprintf("%d_segments_%d_bytes", segCount, length), func(t *testing.T) { t.Parallel() diff --git a/pkg/cac/cac_test.go b/pkg/cac/cac_test.go index 2f1384aa2a0..5008db922df 100644 --- a/pkg/cac/cac_test.go +++ b/pkg/cac/cac_test.go @@ -89,7 +89,6 @@ func TestChunkInvariantsNew(t *testing.T) { wantErr: nil, }, } { - cc := cc t.Run(cc.name, func(t *testing.T) { t.Parallel() @@ -135,7 +134,6 @@ func TestChunkInvariantsNewWithDataSpan(t *testing.T) { wantErr: nil, }, } { - cc := cc t.Run(cc.name, func(t *testing.T) { t.Parallel() @@ -219,7 +217,6 @@ func TestInvalid(t *testing.T) { ), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go index d10c14a9991..77fbeae4a56 100644 --- a/pkg/crypto/crypto.go +++ b/pkg/crypto/crypto.go @@ -14,6 +14,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethersphere/bee/v2/pkg/swarm" "golang.org/x/crypto/sha3" ) @@ -29,7 +30,6 @@ const ( // NewOverlayAddress constructs a Swarm Address from ECDSA public key. func NewOverlayAddress(p ecdsa.PublicKey, networkID uint64, nonce []byte) (swarm.Address, error) { - ethAddr, err := NewEthereumAddress(p) if err != nil { return swarm.ZeroAddress, err @@ -44,7 +44,6 @@ func NewOverlayAddress(p ecdsa.PublicKey, networkID uint64, nonce []byte) (swarm // NewOverlayFromEthereumAddress constructs a Swarm Address for an Ethereum address. func NewOverlayFromEthereumAddress(ethAddr []byte, networkID uint64, nonce []byte) (swarm.Address, error) { - netIDBytes := make([]byte, 8) binary.LittleEndian.PutUint64(netIDBytes, networkID) @@ -116,7 +115,7 @@ func NewEthereumAddress(p ecdsa.PublicKey) ([]byte, error) { if p.X == nil || p.Y == nil { return nil, errors.New("invalid public key") } - pubBytes := elliptic.Marshal(btcec.S256(), p.X, p.Y) + pubBytes := crypto.S256().Marshal(p.X, p.Y) pubHash, err := LegacyKeccak256(pubBytes[1:]) if err != nil { return nil, err diff --git a/pkg/encryption/mock/mock_test.go b/pkg/encryption/mock/mock_test.go index 97e52b7777c..3dce0055802 100644 --- a/pkg/encryption/mock/mock_test.go +++ b/pkg/encryption/mock/mock_test.go @@ -62,7 +62,6 @@ func TestEncryptor_Encrypt(t *testing.T) { wantErr: mock.ErrInvalidXORKey, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -125,7 +124,6 @@ func TestEncryptor_Decrypt(t *testing.T) { wantErr: mock.ErrInvalidXORKey, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/feeds/sequence/sequence.go b/pkg/feeds/sequence/sequence.go index 5184885f1ab..f1f254309ca 100644 --- a/pkg/feeds/sequence/sequence.go +++ b/pkg/feeds/sequence/sequence.go @@ -195,10 +195,10 @@ func (f *asyncFinder) At(ctx context.Context, at int64, after uint64) (ch swarm. } // at launches concurrent lookups at exponential intervals after the starting from further -func (f *asyncFinder) at(ctx context.Context, at int64, min int, i *interval, c chan<- *result, quit <-chan struct{}) { +func (f *asyncFinder) at(ctx context.Context, at int64, minValue int, i *interval, c chan<- *result, quit <-chan struct{}) { var wg sync.WaitGroup - for l := i.level; l > min; l-- { + for l := i.level; l > minValue; l-- { select { case <-quit: // if the parent process quit return diff --git a/pkg/feeds/testing/lookup.go b/pkg/feeds/testing/lookup.go index 8fd852888c8..21656be5b64 100644 --- a/pkg/feeds/testing/lookup.go +++ b/pkg/feeds/testing/lookup.go @@ -116,7 +116,6 @@ func TestFinderFixIntervals(t *testing.T, nextf func() (bool, int64), finderf fu } func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func(storage.Getter, *feeds.Feed) feeds.Lookup, updaterf func(putter storage.Putter, signer crypto.Signer, topic []byte) (feeds.Updater, error)) { - storer := &Timeout{inmemchunkstore.New()} topicStr := "testtopic" topic, err := crypto.LegacyKeccak256([]byte(topicStr)) @@ -188,7 +187,6 @@ func TestFinderRandomIntervals(t *testing.T, finderf func(storage.Getter, *feeds t.Parallel() for j := 0; j < 3; j++ { - j := j t.Run(fmt.Sprintf("random intervals %d", j), func(t *testing.T) { t.Parallel() diff --git a/pkg/file/buffer_test.go b/pkg/file/buffer_test.go index 3416db1084a..9fdf66266b6 100644 --- a/pkg/file/buffer_test.go +++ b/pkg/file/buffer_test.go @@ -34,7 +34,6 @@ func TestChunkPipe(t *testing.T) { {swarm.ChunkSize, swarm.ChunkSize}, // on, on } for i, tc := range dataWrites { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() @@ -130,7 +129,6 @@ func TestCopyBuffer(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("buf_%-4d/data_size_%d", tc.readBufferSize, tc.dataSize), func(t *testing.T) { t.Parallel() @@ -197,7 +195,7 @@ func reader(t *testing.T, bufferSize int, r io.Reader, c chan<- readResult) { defer close(c) - var buf = make([]byte, bufferSize) + buf := make([]byte, bufferSize) for { n, err := r.Read(buf) if errors.Is(err, io.EOF) { diff --git a/pkg/file/joiner/joiner_test.go b/pkg/file/joiner/joiner_test.go index 6d3ecd9f241..ce00ac8d782 100644 --- a/pkg/file/joiner/joiner_test.go +++ b/pkg/file/joiner/joiner_test.go @@ -230,7 +230,7 @@ func TestJoinerMalformed(t *testing.T) { func TestEncryptDecrypt(t *testing.T) { t.Parallel() - var tests = []struct { + tests := []struct { chunkLength int }{ {10}, @@ -243,7 +243,6 @@ func TestEncryptDecrypt(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(fmt.Sprintf("Encrypt %d bytes", tt.chunkLength), func(t *testing.T) { t.Parallel() @@ -333,7 +332,6 @@ func TestSeek(t *testing.T) { size: 2*swarm.ChunkSize*swarm.ChunkSize + 1000, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -611,7 +609,6 @@ func TestPrefetch(t *testing.T) { expRead: 100000, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -1074,7 +1071,6 @@ func TestJoinerRedundancy(t *testing.T) { true, }, } { - tc := tc t.Run(fmt.Sprintf("redundancy=%d encryption=%t", tc.rLevel, tc.encryptChunk), func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1339,12 +1335,10 @@ func TestJoinerRedundancyMultilevel(t *testing.T) { r2level := []int{2, 1, 2, 3, 2} encryptChunk := []bool{false, false, true, true, true} for _, rLevel := range []redundancy.Level{0, 1, 2, 3, 4} { - rLevel := rLevel // speeding up tests by skipping some of them t.Run(fmt.Sprintf("rLevel=%v", rLevel), func(t *testing.T) { t.Parallel() for _, encrypt := range []bool{false, true} { - encrypt := encrypt shardCnt := rLevel.GetMaxShards() if encrypt { shardCnt = rLevel.GetMaxEncShards() @@ -1416,7 +1410,6 @@ func (c *chunkStore) Replace(_ context.Context, ch swarm.Chunk) error { defer c.mu.Unlock() c.chunks[ch.Address().ByteString()] = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp()) return nil - } func (c *chunkStore) Has(_ context.Context, addr swarm.Address) (bool, error) { diff --git a/pkg/file/pipeline/bmt/bmt_test.go b/pkg/file/pipeline/bmt/bmt_test.go index 109a9cc9d42..7fba1d763d0 100644 --- a/pkg/file/pipeline/bmt/bmt_test.go +++ b/pkg/file/pipeline/bmt/bmt_test.go @@ -46,7 +46,6 @@ func TestBmtWriter(t *testing.T) { expErr: bmt.ErrInvalidData, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/builder/builder_test.go b/pkg/file/pipeline/builder/builder_test.go index 9f8070b1510..17091bcc0d7 100644 --- a/pkg/file/pipeline/builder/builder_test.go +++ b/pkg/file/pipeline/builder/builder_test.go @@ -87,7 +87,6 @@ func TestAllVectors(t *testing.T) { for i := 1; i <= 20; i++ { data, expect := test.GetVector(t, i) - i := i t.Run(fmt.Sprintf("data length %d, vector %d", len(data), i), func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/feeder/feeder_test.go b/pkg/file/pipeline/feeder/feeder_test.go index 51c56dd6a3b..6632b854461 100644 --- a/pkg/file/pipeline/feeder/feeder_test.go +++ b/pkg/file/pipeline/feeder/feeder_test.go @@ -75,7 +75,6 @@ func TestFeeder(t *testing.T) { span: 5, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() var results pipeline.PipeWriteArgs @@ -179,7 +178,6 @@ func TestFeederFlush(t *testing.T) { span: 3, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/file/pipeline/hashtrie/hashtrie_test.go b/pkg/file/pipeline/hashtrie/hashtrie_test.go index 7d2820a0ae0..b36fede3556 100644 --- a/pkg/file/pipeline/hashtrie/hashtrie_test.go +++ b/pkg/file/pipeline/hashtrie/hashtrie_test.go @@ -84,9 +84,7 @@ func newErasureHashTrieWriter( func TestLevels(t *testing.T) { t.Parallel() - var ( - hashSize = 32 - ) + hashSize := 32 // to create a level wrap we need to do branching^(level-1) writes for _, tc := range []struct { @@ -134,8 +132,6 @@ func TestLevels(t *testing.T) { writes: 16384, }, } { - - tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() @@ -165,7 +161,7 @@ func TestLevels(t *testing.T) { t.Fatal(err) } - //check the span. since write spans are 1 value 1, then expected span == tc.writes + // check the span. since write spans are 1 value 1, then expected span == tc.writes sp := binary.LittleEndian.Uint64(rootch.Data()[:swarm.SpanSize]) if sp != uint64(tc.writes) { t.Fatalf("want span %d got %d", tc.writes, sp) @@ -312,7 +308,6 @@ func TestRedundancy(t *testing.T) { parities: 116, // // 87 (full ch) + 29 (2 ref) }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() subCtx := redundancy.SetLevelInContext(ctx, tc.level) diff --git a/pkg/file/redundancy/getter/getter_test.go b/pkg/file/redundancy/getter/getter_test.go index 6b22230a60d..42566866a3e 100644 --- a/pkg/file/redundancy/getter/getter_test.go +++ b/pkg/file/redundancy/getter/getter_test.go @@ -310,8 +310,6 @@ func checkShardsAvailable(t *testing.T, s storage.ChunkStore, addrs []swarm.Addr t.Helper() eg, ctx := errgroup.WithContext(context.Background()) for i, addr := range addrs { - i := i - addr := addr eg.Go(func() (err error) { var delay time.Duration var ch swarm.Chunk diff --git a/pkg/hive/hive.go b/pkg/hive/hive.go index a78b863af5b..27858cdcf87 100644 --- a/pkg/hive/hive.go +++ b/pkg/hive/hive.go @@ -110,17 +110,17 @@ func (s *Service) Protocol() p2p.ProtocolSpec { var ErrShutdownInProgress = errors.New("shutdown in progress") func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error { - max := maxBatchSize + maxSize := maxBatchSize s.metrics.BroadcastPeers.Inc() s.metrics.BroadcastPeersPeers.Add(float64(len(peers))) for len(peers) > 0 { - if max > len(peers) { - max = len(peers) + if maxSize > len(peers) { + maxSize = len(peers) } // If broadcasting limit is exceeded, return early - if !s.outLimiter.Allow(addressee.ByteString(), max) { + if !s.outLimiter.Allow(addressee.ByteString(), maxSize) { return nil } @@ -130,11 +130,11 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p default: } - if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil { + if err := s.sendPeers(ctx, addressee, peers[:maxSize]); err != nil { return err } - peers = peers[max:] + peers = peers[maxSize:] } return nil @@ -277,13 +277,11 @@ func (s *Service) startCheckPeersHandler() { } func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { - var peersToAdd []swarm.Address mtx := sync.Mutex{} wg := sync.WaitGroup{} addPeer := func(newPeer *pb.BzzAddress, multiUnderlay ma.Multiaddr) { - err := s.sem.Acquire(ctx, 1) if err != nil { return @@ -332,7 +330,6 @@ func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { peersToAdd = append(peersToAdd, bzzAddress.Overlay) mtx.Unlock() }() - } for _, p := range peers.Peers { diff --git a/pkg/hive/hive_test.go b/pkg/hive/hive_test.go index a908c68b899..fdb903cf7e5 100644 --- a/pkg/hive/hive_test.go +++ b/pkg/hive/hive_test.go @@ -241,7 +241,6 @@ func TestBroadcastPeers(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() @@ -366,7 +365,6 @@ func readAndAssertPeersMsgs(in []byte, expectedLen int) ([]pb.Peers, error) { return new(pb.Peers) }, ) - if err != nil { return nil, err } diff --git a/pkg/jsonhttp/handlers_test.go b/pkg/jsonhttp/handlers_test.go index 7ede319580c..c40e4865949 100644 --- a/pkg/jsonhttp/handlers_test.go +++ b/pkg/jsonhttp/handlers_test.go @@ -173,7 +173,6 @@ func TestNewMaxBodyBytesHandler(t *testing.T) { wantCode: http.StatusRequestEntityTooLarge, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/jsonhttp/jsonhttp_test.go b/pkg/jsonhttp/jsonhttp_test.go index cd6a378d5c5..61a17684e5e 100644 --- a/pkg/jsonhttp/jsonhttp_test.go +++ b/pkg/jsonhttp/jsonhttp_test.go @@ -174,7 +174,6 @@ func TestRespond_special(t *testing.T) { wantMessage: "2.4.8.16", }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/keystore/file/key.go b/pkg/keystore/file/key.go index 68e6a2e2edf..dcbe22612f5 100644 --- a/pkg/keystore/file/key.go +++ b/pkg/keystore/file/key.go @@ -83,7 +83,11 @@ func encryptKey(k *ecdsa.PrivateKey, password string, edg keystore.EDG) ([]byte, } addr = a case elliptic.P256(): - addr = elliptic.Marshal(elliptic.P256(), k.PublicKey.X, k.PublicKey.Y) + privKey, err := k.ECDH() + if err != nil { + return nil, fmt.Errorf("generate key: %w", err) + } + addr = privKey.PublicKey().Bytes() default: return nil, fmt.Errorf("unsupported curve: %v", k.PublicKey.Curve) } diff --git a/pkg/keystore/file/service_test.go b/pkg/keystore/file/service_test.go index 4c115c7f79e..f5f90c3231e 100644 --- a/pkg/keystore/file/service_test.go +++ b/pkg/keystore/file/service_test.go @@ -5,8 +5,13 @@ package file_test import ( + "bytes" + "crypto/elliptic" "testing" + "github.com/btcsuite/btcd/btcec/v2" + ethcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore/file" "github.com/ethersphere/bee/v2/pkg/keystore/test" ) @@ -14,7 +19,58 @@ import ( func TestService(t *testing.T) { t.Parallel() - dir := t.TempDir() + t.Run("EDGSecp256_K1", func(t *testing.T) { + test.Service(t, file.New(t.TempDir()), crypto.EDGSecp256_K1) + }) - test.Service(t, file.New(dir)) + t.Run("EDGSecp256_R1", func(t *testing.T) { + test.Service(t, file.New(t.TempDir()), crypto.EDGSecp256_R1) + }) +} + +func TestDeprecatedEllipticMarshal(t *testing.T) { + t.Parallel() + + t.Run("EDGSecp256_K1", func(t *testing.T) { + pk, err := crypto.EDGSecp256_K1.Generate() + if err != nil { + t.Fatal(err) + } + + pubBytes := ethcrypto.S256().Marshal(pk.X, pk.Y) + if len(pubBytes) != 65 { + t.Fatalf("public key bytes length mismatch") + } + + // nolint:staticcheck + pubBytesDeprecated := elliptic.Marshal(btcec.S256(), pk.X, pk.Y) + + if !bytes.Equal(pubBytes, pubBytesDeprecated) { + t.Fatalf("public key bytes mismatch") + } + }) + + t.Run("EDGSecp256_R1", func(t *testing.T) { + pk, err := crypto.EDGSecp256_R1.Generate() + if err != nil { + t.Fatal(err) + } + + pkECDH, err := pk.ECDH() + if err != nil { + t.Fatalf("ecdh failed: %v", err) + } + + pubBytes := pkECDH.PublicKey().Bytes() + if len(pubBytes) != 65 { + t.Fatalf("public key bytes length mismatch") + } + + // nolint:staticcheck + pubBytesDeprecated := elliptic.Marshal(elliptic.P256(), pk.X, pk.Y) + + if !bytes.Equal(pubBytes, pubBytesDeprecated) { + t.Fatalf("public key bytes mismatch") + } + }) } diff --git a/pkg/keystore/mem/service_test.go b/pkg/keystore/mem/service_test.go index ae3d4640cc0..37a944a059d 100644 --- a/pkg/keystore/mem/service_test.go +++ b/pkg/keystore/mem/service_test.go @@ -7,6 +7,7 @@ package mem_test import ( "testing" + "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore/mem" "github.com/ethersphere/bee/v2/pkg/keystore/test" ) @@ -14,5 +15,11 @@ import ( func TestService(t *testing.T) { t.Parallel() - test.Service(t, mem.New()) + t.Run("EDGSecp256_K1", func(t *testing.T) { + test.Service(t, mem.New(), crypto.EDGSecp256_K1) + }) + + t.Run("EDGSecp256_R1", func(t *testing.T) { + test.Service(t, mem.New(), crypto.EDGSecp256_R1) + }) } diff --git a/pkg/keystore/test/test.go b/pkg/keystore/test/test.go index 5eb4a4c1b26..47309a6afe0 100644 --- a/pkg/keystore/test/test.go +++ b/pkg/keystore/test/test.go @@ -9,13 +9,12 @@ import ( "errors" "testing" - "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/keystore" ) // Service is a utility testing function that can be used to test // implementations of the keystore.Service interface. -func Service(t *testing.T, s keystore.Service) { +func Service(t *testing.T, s keystore.Service, edg keystore.EDG) { t.Helper() exists, err := s.Exists("swarm") @@ -27,7 +26,6 @@ func Service(t *testing.T, s keystore.Service) { t.Fatal("should not exist") } - edg := crypto.EDGSecp256_K1 // create a new swarm key k1, created, err := s.Key("swarm", "pass123456", edg) if err != nil { diff --git a/pkg/log/registry.go b/pkg/log/registry.go index d1b71a46f67..a467cfc39cf 100644 --- a/pkg/log/registry.go +++ b/pkg/log/registry.go @@ -96,10 +96,10 @@ func NewLogger(name string, opts ...Option) Logger { // of verbosity of the given logger. func SetVerbosity(l Logger, v Level) error { bl := l.(*logger) - switch newLvl, max := v.get(), Level(bl.v); { + switch newLvl, maxValue := v.get(), Level(bl.v); { case newLvl == VerbosityAll: - bl.setVerbosity(max) - case newLvl > max: + bl.setVerbosity(maxValue) + case newLvl > maxValue: return fmt.Errorf("maximum verbosity %d exceeded for logger: %s", bl.v, bl.id) default: bl.setVerbosity(newLvl) diff --git a/pkg/manifest/mantaray/marshal_test.go b/pkg/manifest/mantaray/marshal_test.go index 48e1ef32420..e0d728602fa 100644 --- a/pkg/manifest/mantaray/marshal_test.go +++ b/pkg/manifest/mantaray/marshal_test.go @@ -7,7 +7,6 @@ package mantaray import ( "bytes" "context" - "encoding/hex" "errors" "reflect" @@ -271,7 +270,6 @@ func Test_UnmarshalBinary(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/manifest/mantaray/node_test.go b/pkg/manifest/mantaray/node_test.go index aa2e2f5c355..b9a8da787d7 100644 --- a/pkg/manifest/mantaray/node_test.go +++ b/pkg/manifest/mantaray/node_test.go @@ -136,7 +136,6 @@ func TestAddAndLookupNode(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -264,7 +263,6 @@ func TestRemove(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -305,7 +303,6 @@ func TestRemove(t *testing.T) { t.Fatalf("expected not found error, got %v", err) } } - }) } } @@ -354,7 +351,6 @@ func TestHasPrefix(t *testing.T) { }, } { ctx := context.Background() - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -382,7 +378,6 @@ func TestHasPrefix(t *testing.T) { t.Errorf("expected prefix path %s to be %t, was %t", testPrefix, shouldExist, exists) } } - }) } } diff --git a/pkg/manifest/mantaray/persist.go b/pkg/manifest/mantaray/persist.go index 2bd07d74c94..9c08769a1d0 100644 --- a/pkg/manifest/mantaray/persist.go +++ b/pkg/manifest/mantaray/persist.go @@ -71,7 +71,6 @@ func (n *Node) save(ctx context.Context, s Saver) error { } eg, ectx := errgroup.WithContext(ctx) for _, f := range n.forks { - f := f eg.Go(func() error { return f.Node.save(ectx, s) }) diff --git a/pkg/manifest/mantaray/walker_test.go b/pkg/manifest/mantaray/walker_test.go index e49d72364aa..f6d72687a39 100644 --- a/pkg/manifest/mantaray/walker_test.go +++ b/pkg/manifest/mantaray/walker_test.go @@ -50,8 +50,6 @@ func TestWalkNode(t *testing.T) { }, } { ctx := context.Background() - tc := tc - createTree := func(t *testing.T, toAdd [][]byte) *mantaray.Node { t.Helper() @@ -87,7 +85,6 @@ func TestWalkNode(t *testing.T) { walkedCount := 0 walker := func(path []byte, node *mantaray.Node, err error) error { - if !pathExistsInRightSequence(path, tc.expected, walkedCount) { return fmt.Errorf("walkFn returned unexpected path: %s", path) } @@ -123,7 +120,6 @@ func TestWalkNode(t *testing.T) { walkedCount := 0 walker := func(path []byte, node *mantaray.Node, err error) error { - if !pathExistsInRightSequence(path, tc.expected, walkedCount) { return fmt.Errorf("walkFn returned unexpected path: %s", path) } diff --git a/pkg/manifest/simple/manifest_test.go b/pkg/manifest/simple/manifest_test.go index 4b7f160b603..e34616107ed 100644 --- a/pkg/manifest/simple/manifest_test.go +++ b/pkg/manifest/simple/manifest_test.go @@ -100,7 +100,6 @@ func TestEntries(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -164,7 +163,6 @@ func TestEntries(t *testing.T) { checkLength(t, m, manifestLen-i-1) } - }) } } @@ -198,7 +196,6 @@ func TestMarshal(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -271,7 +268,6 @@ func TestHasPrefix(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -294,7 +290,6 @@ func TestHasPrefix(t *testing.T) { t.Errorf("expected prefix path %s to be %t, was %t", testPrefix, shouldExist, exists) } } - }) } } diff --git a/pkg/manifest/simple/walker_test.go b/pkg/manifest/simple/walker_test.go index c3877ce664e..a0159465d44 100644 --- a/pkg/manifest/simple/walker_test.go +++ b/pkg/manifest/simple/walker_test.go @@ -15,7 +15,6 @@ func TestWalkEntry(t *testing.T) { t.Parallel() for _, tc := range makeTestCases(t) { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/libp2p/internal/breaker/breaker_test.go b/pkg/p2p/libp2p/internal/breaker/breaker_test.go index 2693037abbf..35e33222df4 100644 --- a/pkg/p2p/libp2p/internal/breaker/breaker_test.go +++ b/pkg/p2p/libp2p/internal/breaker/breaker_test.go @@ -73,7 +73,6 @@ func TestExecute(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/libp2p/internal/reacher/reacher_test.go b/pkg/p2p/libp2p/internal/reacher/reacher_test.go index a9cfa005ed5..73ba1842b2d 100644 --- a/pkg/p2p/libp2p/internal/reacher/reacher_test.go +++ b/pkg/p2p/libp2p/internal/reacher/reacher_test.go @@ -61,7 +61,6 @@ func TestPingSuccess(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/libp2p/libp2p.go b/pkg/p2p/libp2p/libp2p.go index da620430fb6..9055cdd8fc8 100644 --- a/pkg/p2p/libp2p/libp2p.go +++ b/pkg/p2p/libp2p/libp2p.go @@ -556,7 +556,6 @@ func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) { func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) { for _, ss := range p.StreamSpecs { - ss := ss id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name)) matcher, err := s.protocolSemverMatcher(id) if err != nil { diff --git a/pkg/p2p/libp2p/static_resolver_test.go b/pkg/p2p/libp2p/static_resolver_test.go index 4aff2ca41c6..c45cce0142f 100644 --- a/pkg/p2p/libp2p/static_resolver_test.go +++ b/pkg/p2p/libp2p/static_resolver_test.go @@ -82,7 +82,6 @@ func TestStaticAddressResolver(t *testing.T) { want: "/dns/ipv4and6.com/tcp/30777/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd", }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/p2p/protobuf/protobuf_test.go b/pkg/p2p/protobuf/protobuf_test.go index 5dd675b0687..3a1edf30bf5 100644 --- a/pkg/p2p/protobuf/protobuf_test.go +++ b/pkg/p2p/protobuf/protobuf_test.go @@ -44,7 +44,6 @@ func TestReader_ReadMsg(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -98,7 +97,6 @@ func TestReader_timeout(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -164,7 +162,6 @@ func TestWriter(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -210,7 +207,6 @@ func TestWriter_timeout(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name+"WithContext", func(t *testing.T) { t.Parallel() diff --git a/pkg/postage/stampissuer_test.go b/pkg/postage/stampissuer_test.go index 3a72b57f3d6..460e1f20458 100644 --- a/pkg/postage/stampissuer_test.go +++ b/pkg/postage/stampissuer_test.go @@ -145,8 +145,6 @@ func TestStampItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -249,7 +247,6 @@ func TestUtilization(t *testing.T) { t.Logf("depth: %d, actual utilization: %f", depth, float64(count)/math.Pow(2, float64(depth))) } - } func bytesToIndex(buf []byte) (bucket, index uint32) { diff --git a/pkg/puller/puller_test.go b/pkg/puller/puller_test.go index 80ae02243f0..29687138b9c 100644 --- a/pkg/puller/puller_test.go +++ b/pkg/puller/puller_test.go @@ -35,7 +35,8 @@ func TestOneSync(t *testing.T) { cursors = []uint64{1000, 1000, 1000} replies = []mockps.SyncReply{ {Bin: 1, Start: 1, Topmost: 1000, Peer: addr}, - {Bin: 2, Start: 1, Topmost: 1001, Peer: addr}} + {Bin: 2, Start: 1, Topmost: 1001, Peer: addr}, + } ) _, _, kad, pullsync := newPuller(t, opts{ @@ -66,7 +67,8 @@ func TestSyncOutsideDepth(t *testing.T) { replies = []mockps.SyncReply{ {Bin: 0, Start: 1, Topmost: 1000, Peer: addr2}, {Bin: 2, Start: 1, Topmost: 1000, Peer: addr}, - {Bin: 3, Start: 1, Topmost: 1000, Peer: addr}} + {Bin: 3, Start: 1, Topmost: 1000, Peer: addr}, + } ) _, _, kad, pullsync := newPuller(t, opts{ @@ -177,7 +179,6 @@ func TestSyncIntervals(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -474,9 +475,7 @@ func TestRadiusIncrease(t *testing.T) { func TestContinueSyncing(t *testing.T) { t.Parallel() - var ( - addr = swarm.RandAddress(t) - ) + addr := swarm.RandAddress(t) _, _, kad, pullsync := newPuller(t, opts{ kad: []kadMock.Option{ @@ -515,7 +514,8 @@ func TestPeerGone(t *testing.T) { addr = swarm.RandAddress(t) replies = []mockps.SyncReply{ {Bin: 0, Start: 1, Topmost: 1001, Peer: addr}, - {Bin: 1, Start: 1, Topmost: 1001, Peer: addr}} + {Bin: 1, Start: 1, Topmost: 1001, Peer: addr}, + } ) p, _, kad, pullsync := newPuller(t, opts{ diff --git a/pkg/replicas/getter_test.go b/pkg/replicas/getter_test.go index f300637c891..d1d727dd5fd 100644 --- a/pkg/replicas/getter_test.go +++ b/pkg/replicas/getter_test.go @@ -181,7 +181,6 @@ func TestGetter(t *testing.T) { } // if j <= c, the original chunk should be retrieved and the context should be cancelled t.Run("retrievals cancelled", func(t *testing.T) { - select { case <-time.After(100 * time.Millisecond): t.Fatal("timed out waiting for context to be cancelled") @@ -233,17 +232,16 @@ func TestGetter(t *testing.T) { } return } - max := 2 - for i := 1; i < tc.level && max < tc.found; i++ { - max = max * 2 + maxValue := 2 + for i := 1; i < tc.level && maxValue < tc.found; i++ { + maxValue = maxValue * 2 } - if attempts > max { - t.Fatalf("too many attempts to retrieve a replica: want at most %v. got %v. latencies %v", max, attempts, latencies) + if attempts > maxValue { + t.Fatalf("too many attempts to retrieve a replica: want at most %v. got %v. latencies %v", maxValue, attempts, latencies) } }) t.Run("dispersion", func(t *testing.T) { - if err := dispersed(redundancy.Level(tc.level), ch, addresses); err != nil { t.Fatalf("addresses are not dispersed: %v", err) } diff --git a/pkg/replicas/putter.go b/pkg/replicas/putter.go index 3404bca5f03..017faae5b9e 100644 --- a/pkg/replicas/putter.go +++ b/pkg/replicas/putter.go @@ -40,7 +40,6 @@ func (p *putter) Put(ctx context.Context, ch swarm.Chunk) (err error) { errc := make(chan error, rlevel.GetReplicaCount()) wg := sync.WaitGroup{} for r := range rr.c { - r := r wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/resolver/client/ens/ens_test.go b/pkg/resolver/client/ens/ens_test.go index aa0d0c0cb64..77253d786ba 100644 --- a/pkg/resolver/client/ens/ens_test.go +++ b/pkg/resolver/client/ens/ens_test.go @@ -53,7 +53,6 @@ func TestNewENSClient(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -186,7 +185,6 @@ func TestResolve(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() diff --git a/pkg/resolver/multiresolver/config_test.go b/pkg/resolver/multiresolver/config_test.go index 9b4c2d502c2..a1195532ec9 100644 --- a/pkg/resolver/multiresolver/config_test.go +++ b/pkg/resolver/multiresolver/config_test.go @@ -118,7 +118,6 @@ func TestParseConnectionStrings(t *testing.T) { }, } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() diff --git a/pkg/resolver/multiresolver/multiresolver_test.go b/pkg/resolver/multiresolver/multiresolver_test.go index 326cbb69a04..c1f9ba0311a 100644 --- a/pkg/resolver/multiresolver/multiresolver_test.go +++ b/pkg/resolver/multiresolver/multiresolver_test.go @@ -76,7 +76,6 @@ func TestPushResolver(t *testing.T) { } for _, tC := range testCases { - tC := tC t.Run(tC.desc, func(t *testing.T) { t.Parallel() @@ -241,7 +240,6 @@ func TestResolve(t *testing.T) { } for _, tC := range testCases { - tC := tC t.Run(tC.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/settlement/swap/chequebook/export_test.go b/pkg/settlement/swap/chequebook/export_test.go index 008586bc0a8..5a274840522 100644 --- a/pkg/settlement/swap/chequebook/export_test.go +++ b/pkg/settlement/swap/chequebook/export_test.go @@ -1,3 +1,6 @@ +// Copyright 2021 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package chequebook var ( diff --git a/pkg/settlement/swap/export_test.go b/pkg/settlement/swap/export_test.go index fbea8817753..da949633b15 100644 --- a/pkg/settlement/swap/export_test.go +++ b/pkg/settlement/swap/export_test.go @@ -1,3 +1,6 @@ +// Copyright 2021 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package swap var ( diff --git a/pkg/sharky/shard_test.go b/pkg/sharky/shard_test.go index 567a0cf9ad3..f816f69c698 100644 --- a/pkg/sharky/shard_test.go +++ b/pkg/sharky/shard_test.go @@ -31,7 +31,6 @@ func TestLocationSerialization(t *testing.T) { Length: math.MaxUint16, }, } { - tc := tc t.Run(fmt.Sprintf("%d_%d_%d", tc.Shard, tc.Slot, tc.Length), func(t *testing.T) { t.Parallel() diff --git a/pkg/sharky/sharky_test.go b/pkg/sharky/sharky_test.go index 1e3aadd78b4..268a996e7c0 100644 --- a/pkg/sharky/sharky_test.go +++ b/pkg/sharky/sharky_test.go @@ -27,7 +27,7 @@ type dirFS struct { } func (d *dirFS) Open(path string) (fs.File, error) { - return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) + return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0o644) } func TestSingleRetrieval(t *testing.T) { @@ -73,7 +73,6 @@ func TestSingleRetrieval(t *testing.T) { nil, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { cctx, cancel := context.WithTimeout(ctx, 800*time.Millisecond) defer cancel() @@ -83,7 +82,6 @@ func TestSingleRetrieval(t *testing.T) { } if err != nil { return - } buf := make([]byte, datasize) err = s.Read(ctx, loc, buf) @@ -188,7 +186,6 @@ func TestConcurrency(t *testing.T) { eg, ectx := errgroup.WithContext(ctx) // a number of workers write sequential numbers to sharky for k := 0; k < workers; k++ { - k := k eg.Go(func() error { <-start buf := make([]byte, 4) @@ -287,7 +284,6 @@ func TestConcurrency(t *testing.T) { {32, 8, 32}, {64, 32, 64}, } { - c := c t.Run(fmt.Sprintf("workers:%d,shards:%d,size:%d", c.workers, c.shards, c.shardSize), func(t *testing.T) { t.Parallel() test(t, c.workers, c.shards, c.shardSize) diff --git a/pkg/soc/validator_test.go b/pkg/soc/validator_test.go index e6913000149..203e0b1bc84 100644 --- a/pkg/soc/validator_test.go +++ b/pkg/soc/validator_test.go @@ -173,7 +173,6 @@ func TestInvalid(t *testing.T) { }, }, } { - c := c t.Run(c.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/storage/migration/index_test.go b/pkg/storage/migration/index_test.go index d32b2ac4caf..ec35859d01f 100644 --- a/pkg/storage/migration/index_test.go +++ b/pkg/storage/migration/index_test.go @@ -184,7 +184,6 @@ func TestStepIndex_BatchSize(t *testing.T) { const populateItemsCount = 128 for i := 1; i <= 2*populateItemsCount; i <<= 1 { - i := i t.Run(fmt.Sprintf("callback called once per item with batch size: %d", i), func(t *testing.T) { t.Parallel() @@ -362,5 +361,4 @@ func assertItemsInRange(t *testing.T, s storage.Store, from, to int) { if err != nil { t.Fatalf("populate store should succeed: %v", err) } - } diff --git a/pkg/storage/migration/migration_test.go b/pkg/storage/migration/migration_test.go index 4201ba9ef9e..20197955c7a 100644 --- a/pkg/storage/migration/migration_test.go +++ b/pkg/storage/migration/migration_test.go @@ -19,9 +19,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/storage/storageutil" ) -var ( - errStep = errors.New("step error") -) +var errStep = errors.New("step error") func TestLatestVersion(t *testing.T) { t.Parallel() @@ -159,7 +157,6 @@ func TestValidateVersions(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() if err := migration.ValidateVersions(tt.input); (err != nil) != tt.wantErr { @@ -329,10 +326,10 @@ func TestTagIDAddressItem_MarshalAndUnmarshal(t *testing.T) { Item: &migration.StorageVersionItem{Version: rand.Uint64()}, Factory: func() storage.Item { return new(migration.StorageVersionItem) }, }, - }} + }, + } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() storagetest.TestItemMarshalAndUnmarshal(t, tc.test) diff --git a/pkg/storage/storagetest/benchmark.go b/pkg/storage/storagetest/benchmark.go index dd762bb2579..ea01d58121c 100644 --- a/pkg/storage/storagetest/benchmark.go +++ b/pkg/storage/storagetest/benchmark.go @@ -65,8 +65,8 @@ func (g *randomValueGenerator) Value(i int) []byte { func makeRandomValueGenerator(r *rand.Rand, ratio float64, valueSize int) randomValueGenerator { b := compressibleBytes(r, ratio, valueSize) - max := maxInt(valueSize, 1024*1024) - for len(b) < max { + maxVal := maxInt(valueSize, 1024*1024) + for len(b) < maxVal { b = append(b, compressibleBytes(r, ratio, valueSize)...) } return randomValueGenerator{b: b, k: valueSize} @@ -352,8 +352,8 @@ type batchDBWriter struct { count int } -func (w *batchDBWriter) commit(max int) { - if w.count >= max { +func (w *batchDBWriter) commit(maxValue int) { + if w.count >= maxValue { _ = w.batch.Commit() w.count = 0 w.batch = w.db.Batch(context.Background()) diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go index 0ae0eda22f9..fc25d0279f3 100644 --- a/pkg/storageincentives/agent_test.go +++ b/pkg/storageincentives/agent_test.go @@ -42,58 +42,58 @@ func TestAgent(t *testing.T) { expectedCalls bool balance *big.Int doubling uint8 - }{{ - name: "3 blocks per phase, same block number returns twice", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 1, - expectedCalls: true, - limit: 108, // computed with blocksPerRound * (exptectedCalls + 2) - balance: bigBalance, - doubling: 1, - }, { - name: "3 blocks per phase, block number returns every block", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 1, - expectedCalls: true, - limit: 108, - balance: bigBalance, - doubling: 0, - }, { - name: "no expected calls - block number returns late after each phase", - blocksPerRound: 9, - blocksPerPhase: 3, - incrementBy: 6, - expectedCalls: false, - limit: 108, - balance: bigBalance, - doubling: 0, - }, { - name: "4 blocks per phase, block number returns every other block", - blocksPerRound: 12, - blocksPerPhase: 4, - incrementBy: 2, - expectedCalls: true, - limit: 144, - balance: bigBalance, - doubling: 1, - }, { - // This test case is based on previous, but this time agent will not have enough - // balance to participate in the game so no calls are going to be made. - name: "no expected calls - insufficient balance", - blocksPerRound: 12, - blocksPerPhase: 4, - incrementBy: 2, - expectedCalls: false, - limit: 144, - balance: big.NewInt(0), - doubling: 1, - }, + }{ + { + name: "3 blocks per phase, same block number returns twice", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 1, + expectedCalls: true, + limit: 108, // computed with blocksPerRound * (exptectedCalls + 2) + balance: bigBalance, + doubling: 1, + }, { + name: "3 blocks per phase, block number returns every block", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 1, + expectedCalls: true, + limit: 108, + balance: bigBalance, + doubling: 0, + }, { + name: "no expected calls - block number returns late after each phase", + blocksPerRound: 9, + blocksPerPhase: 3, + incrementBy: 6, + expectedCalls: false, + limit: 108, + balance: bigBalance, + doubling: 0, + }, { + name: "4 blocks per phase, block number returns every other block", + blocksPerRound: 12, + blocksPerPhase: 4, + incrementBy: 2, + expectedCalls: true, + limit: 144, + balance: bigBalance, + doubling: 1, + }, { + // This test case is based on previous, but this time agent will not have enough + // balance to participate in the game so no calls are going to be made. + name: "no expected calls - insufficient balance", + blocksPerRound: 12, + blocksPerPhase: 4, + incrementBy: 2, + expectedCalls: false, + limit: 144, + balance: big.NewInt(0), + doubling: 1, + }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/storageincentives/soc_mine_test.go b/pkg/storageincentives/soc_mine_test.go index aeec8738676..29a8b5e0898 100644 --- a/pkg/storageincentives/soc_mine_test.go +++ b/pkg/storageincentives/soc_mine_test.go @@ -125,7 +125,6 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd count := 8 // number of parallel workers wg := sync.WaitGroup{} for i := 0; i < count; i++ { - i := i wg.Add(1) eg.Go(func() (err error) { offset := i * 4 diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go index 2bd951e96de..79536960d94 100644 --- a/pkg/storer/internal/cache/cache_test.go +++ b/pkg/storer/internal/cache/cache_test.go @@ -69,8 +69,6 @@ func TestCacheEntryItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/chunkstamp/chunkstamp_test.go b/pkg/storer/internal/chunkstamp/chunkstamp_test.go index 49a1ef7d5ff..1167a56f10a 100644 --- a/pkg/storer/internal/chunkstamp/chunkstamp_test.go +++ b/pkg/storer/internal/chunkstamp/chunkstamp_test.go @@ -116,8 +116,6 @@ func TestChunkStampItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -225,7 +223,6 @@ func TestStoreLoadDelete(t *testing.T) { }) t.Run("delete all stored stamp index", func(t *testing.T) { - if err := ts.Run(context.Background(), func(s transaction.Store) error { return chunkstamp.Store(s.IndexStore(), ns, chunk) }); err != nil { diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index d426adbe1e6..9e30c1af876 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -84,8 +84,6 @@ func TestRetrievalIndexItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -108,7 +106,7 @@ type memFS struct { } func (m *memFS) Open(path string) (fs.File, error) { - return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644) } func TestChunkStore(t *testing.T) { diff --git a/pkg/storer/internal/pinning/pinning_test.go b/pkg/storer/internal/pinning/pinning_test.go index ace7929997f..21b2b5b5600 100644 --- a/pkg/storer/internal/pinning/pinning_test.go +++ b/pkg/storer/internal/pinning/pinning_test.go @@ -34,7 +34,6 @@ func newTestStorage(t *testing.T) transaction.Storage { } func TestPinStore(t *testing.T) { - tests := make([]pinningCollection, 0, 3) for _, tc := range []struct { @@ -69,7 +68,6 @@ func TestPinStore(t *testing.T) { t.Run("create new collections", func(t *testing.T) { for tCount, tc := range tests { t.Run(fmt.Sprintf("create collection %d", tCount), func(t *testing.T) { - var putter internal.PutterCloserWithReference var err error err = st.Run(context.Background(), func(s transaction.Store) error { @@ -519,8 +517,6 @@ func TestPinCollectionItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/reserve/items_test.go b/pkg/storer/internal/reserve/items_test.go index 0d82c926d62..19c22224d3a 100644 --- a/pkg/storer/internal/reserve/items_test.go +++ b/pkg/storer/internal/reserve/items_test.go @@ -130,8 +130,6 @@ func TestReserveItems(t *testing.T) { } for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 277e6b7c125..d5d001e3440 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -49,7 +49,6 @@ func New( radiusSetter topology.SetStorageRadiuser, logger log.Logger, ) (*Reserve, error) { - rs := &Reserve{ baseAddr: baseAddr, st: st, @@ -99,7 +98,6 @@ func New( // 3. A new chunk that has the same address belonging to the same batch with an already stored chunk will overwrite the existing chunk // if the new chunk has a higher stamp timestamp (regardless of batch type and chunk type, eg CAC & SOC). func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { - // batchID lock, Put vs Eviction r.multx.Lock(string(chunk.Stamp().BatchID())) defer r.multx.Unlock(string(chunk.Stamp().BatchID())) @@ -128,7 +126,6 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { var shouldIncReserveSize, shouldDecrReserveSize bool err = r.st.Run(ctx, func(s transaction.Store) error { - oldStampIndex, loadedStampIndex, err := stampindex.LoadOrStore(s.IndexStore(), reserveScope, chunk) if err != nil { return fmt.Errorf("load or store stamp index for chunk %v has fail: %w", chunk, err) @@ -346,7 +343,6 @@ func (r *Reserve) EvictBatchBin( count int, bin uint8, ) (int, error) { - r.multx.Lock(string(batchID)) defer r.multx.Unlock(string(batchID)) @@ -427,7 +423,6 @@ func RemoveChunkWithItem( trx transaction.Store, item *BatchRadiusItem, ) error { - var errs error stamp, _ := chunkstamp.LoadWithBatchID(trx.IndexStore(), reserveScope, item.Address, item.BatchID) @@ -514,7 +509,6 @@ func (r *Reserve) IterateChunksItems(startBin uint8, cb func(*ChunkBinItem) (boo // Reset removes all the entires in the reserve. Must be done before any calls to the reserve. func (r *Reserve) Reset(ctx context.Context) error { - size := r.Size() // step 1: delete epoch timestamp @@ -538,7 +532,6 @@ func (r *Reserve) Reset(ctx context.Context) error { return err } for _, item := range bRitems { - item := item eg.Go(func() error { return r.st.Run(ctx, func(s transaction.Store) error { return errors.Join( @@ -568,7 +561,6 @@ func (r *Reserve) Reset(ctx context.Context) error { return err } for _, item := range sitems { - item := item eg.Go(func() error { return r.st.Run(ctx, func(s transaction.Store) error { return errors.Join( diff --git a/pkg/storer/internal/stampindex/stampindex_test.go b/pkg/storer/internal/stampindex/stampindex_test.go index a04ff2fc50c..98d8090c28f 100644 --- a/pkg/storer/internal/stampindex/stampindex_test.go +++ b/pkg/storer/internal/stampindex/stampindex_test.go @@ -87,8 +87,6 @@ func TestStampIndexItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -116,10 +114,8 @@ func TestStoreLoadDeleteWithStamp(t *testing.T) { ns := fmt.Sprintf("namespace_%d", i) t.Run(ns, func(t *testing.T) { t.Run("store new stamp index", func(t *testing.T) { - err := ts.Run(context.Background(), func(s transaction.Store) error { return stampindex.Store(s.IndexStore(), ns, chunk) - }) if err != nil { t.Fatalf("Store(...): unexpected error: %v", err) @@ -164,7 +160,6 @@ func TestStoreLoadDeleteWithStamp(t *testing.T) { }) t.Run("delete stored stamp index", func(t *testing.T) { - err := ts.Run(context.Background(), func(s transaction.Store) error { return stampindex.Delete(s.IndexStore(), ns, chunk.Stamp()) }) diff --git a/pkg/storer/internal/upload/uploadstore_test.go b/pkg/storer/internal/upload/uploadstore_test.go index 4da6d014bb0..0c9bdb04fd1 100644 --- a/pkg/storer/internal/upload/uploadstore_test.go +++ b/pkg/storer/internal/upload/uploadstore_test.go @@ -118,8 +118,6 @@ func TestPushItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -192,8 +190,6 @@ func TestTagItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -307,8 +303,6 @@ func TestUploadItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -360,8 +354,6 @@ func TestItemNextTagID(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -410,8 +402,6 @@ func TestItemDirtyTagItem(t *testing.T) { }} for _, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%s marshal/unmarshal", tc.name), func(t *testing.T) { t.Parallel() @@ -620,7 +610,6 @@ func TestChunkPutter(t *testing.T) { }) t.Run("restart putter", func(t *testing.T) { - var putter internal.PutterCloserWithReference err = ts.Run(context.Background(), func(s transaction.Store) error { @@ -692,7 +681,6 @@ func TestChunkReporter(t *testing.T) { for idx, chunk := range chunktest.GenerateTestRandomChunks(10) { t.Run(fmt.Sprintf("chunk %s", chunk.Address()), func(t *testing.T) { - if err := ts.Run(context.Background(), func(s transaction.Store) error { return putter.Put(context.Background(), s, chunk) }); err != nil { diff --git a/pkg/storer/mock/mockreserve.go b/pkg/storer/mock/mockreserve.go index 897403fe4ce..f8ff36a43f3 100644 --- a/pkg/storer/mock/mockreserve.go +++ b/pkg/storer/mock/mockreserve.go @@ -33,7 +33,6 @@ func WithSubscribeResp(chunks []*storer.BinC, err error) Option { func WithChunks(chs ...swarm.Chunk) Option { return optionFunc(func(p *ReserveStore) { for _, c := range chs { - c := c if c.Stamp() != nil { stampHash, _ := c.Stamp().Hash() p.chunks[c.Address().String()+string(c.Stamp().BatchID())+string(stampHash)] = c @@ -141,6 +140,7 @@ func (s *ReserveStore) StorageRadius() uint8 { defer s.mtx.Unlock() return s.radius } + func (s *ReserveStore) SetStorageRadius(r uint8) { s.mtx.Lock() s.radius = r @@ -199,7 +199,7 @@ func (s *ReserveStore) SetCalls() int { // Get chunks. func (s *ReserveStore) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error) { if s.evilAddr.Equal(addr) { - //inject the malicious chunk instead + // inject the malicious chunk instead return s.evilChunk, nil } @@ -227,7 +227,6 @@ func (s *ReserveStore) put(_ context.Context, chs ...swarm.Chunk) error { s.mtx.Lock() defer s.mtx.Unlock() for _, c := range chs { - c := c if s.putHook != nil { if err := s.putHook(c); err != nil { return err diff --git a/pkg/storer/mock/mockstorer_test.go b/pkg/storer/mock/mockstorer_test.go index 6fbee300c24..484dac11262 100644 --- a/pkg/storer/mock/mockstorer_test.go +++ b/pkg/storer/mock/mockstorer_test.go @@ -83,7 +83,7 @@ func TestMockStorer(t *testing.T) { want := storage.ErrNotFound _, have := mockStorer.Session(1) - if !errors.Is(want, have) { + if !errors.Is(have, want) { t.Fatalf("Session(): unexpected error: want %v have %v", want, have) } }) diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go index 532d6ce8ef0..095904ade7d 100644 --- a/pkg/storer/uploadstore_test.go +++ b/pkg/storer/uploadstore_test.go @@ -89,7 +89,6 @@ func testUploadStore(t *testing.T, newStorer func() (*storer.DB, error)) { duplicate: true, }, } { - tc := tc testName := fmt.Sprintf("upload_%d_chunks", len(tc.chunks)) if tc.pin { testName += "_with_pin" @@ -436,7 +435,6 @@ func TestReporter(t *testing.T) { t.Parallel() testReporter(t, func() (*storer.DB, error) { - opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) return storer.New(context.Background(), "", opts) diff --git a/pkg/swarm/swarm_test.go b/pkg/swarm/swarm_test.go index 99bc243da0e..635392fcb62 100644 --- a/pkg/swarm/swarm_test.go +++ b/pkg/swarm/swarm_test.go @@ -54,7 +54,6 @@ func TestAddress(t *testing.T) { want: swarm.NewAddress([]byte{0x35, 0xa2, 0x6b, 0x7b, 0xb6, 0x45, 0x5c, 0xba, 0xbe, 0x7a, 0xe, 0x5, 0xaa, 0xfb, 0xd0, 0xb8, 0xb2, 0x6f, 0xea, 0xc8, 0x43, 0xe3, 0xb9, 0xa6, 0x49, 0x46, 0x8d, 0xe, 0xa3, 0x7a, 0x12, 0xb2}), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -184,7 +183,6 @@ func TestParseBitStr(t *testing.T) { "011100000", }, } { - if addr, err := swarm.ParseBitStrAddress(tc.bitStr); err != nil { t.Fatal(err) } else if got := swarm.Proximity(addr.Bytes(), tc.overlay.Bytes()); got < uint8(len(tc.bitStr)) { diff --git a/pkg/topology/kademlia/mock/kademlia.go b/pkg/topology/kademlia/mock/kademlia.go index cd115bf7186..3fd9362209e 100644 --- a/pkg/topology/kademlia/mock/kademlia.go +++ b/pkg/topology/kademlia/mock/kademlia.go @@ -21,10 +21,7 @@ type AddrTuple struct { func WithEachPeerRevCalls(addrs ...AddrTuple) Option { return optionFunc(func(m *Mock) { - for _, a := range addrs { - a := a - m.eachPeerRev = append(m.eachPeerRev, a) - } + m.eachPeerRev = append(m.eachPeerRev, addrs...) }) } @@ -90,10 +87,7 @@ func (m *Mock) SetStorageRadius(uint8) { func (m *Mock) AddRevPeers(addrs ...AddrTuple) { m.mtx.Lock() defer m.mtx.Unlock() - for _, a := range addrs { - a := a - m.eachPeerRev = append(m.eachPeerRev, a) - } + m.eachPeerRev = append(m.eachPeerRev, addrs...) } // EachConnectedPeer iterates from closest bin to farthest diff --git a/pkg/topology/pslice/pslice_test.go b/pkg/topology/pslice/pslice_test.go index f5cff17f7ae..f0f3ea250ab 100644 --- a/pkg/topology/pslice/pslice_test.go +++ b/pkg/topology/pslice/pslice_test.go @@ -92,7 +92,7 @@ func TestNoPanicOnEmptyRemove(t *testing.T) { t.Parallel() base := swarm.RandAddress(t) - var ps = pslice.New(4, base) + ps := pslice.New(4, base) addr1 := swarm.RandAddressAt(t, base, 2) addr2 := swarm.RandAddressAt(t, base, 2) @@ -276,7 +276,6 @@ func TestBinPeers(t *testing.T) { label: "full-bins", }, } { - tc := tc t.Run(tc.label, func(t *testing.T) { t.Parallel() @@ -314,7 +313,6 @@ func TestBinPeers(t *testing.T) { } func isEqual(a, b []swarm.Address) bool { - if len(a) != len(b) { return false } @@ -359,7 +357,6 @@ func TestIteratorsJumpStop(t *testing.T) { // // check that the stop functionality works correctly testIterator(t, ps, true, true, 1, []swarm.Address{peers[9]}) testIteratorRev(t, ps, true, true, 1, []swarm.Address{peers[0]}) - } func testIteratorRev(t *testing.T, ps *pslice.PSlice, skipNext, stop bool, iterations int, peerseq []swarm.Address) { diff --git a/pkg/traversal/traversal_test.go b/pkg/traversal/traversal_test.go index c3524ebf81b..b2de3f2ae72 100644 --- a/pkg/traversal/traversal_test.go +++ b/pkg/traversal/traversal_test.go @@ -147,7 +147,6 @@ func TestTraversalBytes(t *testing.T) { } for _, tc := range testCases { - tc := tc chunkCount := int(math.Ceil(float64(tc.dataSize) / swarm.ChunkSize)) t.Run(fmt.Sprintf("%d-chunk-%d-bytes", chunkCount, tc.dataSize), func(t *testing.T) { t.Parallel() @@ -242,7 +241,6 @@ func TestTraversalFiles(t *testing.T) { } for _, tc := range testCases { - tc := tc chunkCount := int(math.Ceil(float64(tc.filesSize) / swarm.ChunkSize)) t.Run(fmt.Sprintf("%d-chunk-%d-bytes", chunkCount, tc.filesSize), func(t *testing.T) { t.Parallel() @@ -403,7 +401,6 @@ func TestTraversalManifest(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("%s-%d-files-%d-chunks", defaultMediaType, len(tc.files), tc.wantHashCount), func(t *testing.T) { t.Parallel()