From 9908d21493b8c66ced6f66bd5d58dca7f672780b Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Wed, 26 Mar 2025 16:47:05 +0100 Subject: [PATCH 1/9] Debug failing tests iterator --- internal/api/iterator_test.go | 487 +++++++++++++++++++++++----------- 1 file changed, 336 insertions(+), 151 deletions(-) diff --git a/internal/api/iterator_test.go b/internal/api/iterator_test.go index a543124cd..79e380dd5 100644 --- a/internal/api/iterator_test.go +++ b/internal/api/iterator_test.go @@ -1,3 +1,5 @@ +// queue_iterator_test.go + package api import ( @@ -12,6 +14,7 @@ import ( "github.com/CosmWasm/wasmvm/v2/types" ) +// queueData wraps contract info to make test usage easier type queueData struct { checksum []byte store *Lookup @@ -19,34 +22,37 @@ type queueData struct { querier types.Querier } +// Store provides a KVStore with an updated gas meter func (q queueData) Store(meter MockGasMeter) types.KVStore { return q.store.WithGasMeter(meter) } +// setupQueueContractWithData uploads/instantiates a queue contract, optionally enqueuing data func setupQueueContractWithData(t *testing.T, cache Cache, values ...int) queueData { t.Helper() checksum := createQueueContract(t, cache) gasMeter1 := NewMockGasMeter(TESTING_GAS_LIMIT) - // instantiate it with this store store := NewLookup(gasMeter1) api := NewMockAPI() querier := DefaultQuerier(MOCK_CONTRACT_ADDR, types.Array[types.Coin]{types.NewCoin(100, "ATOM")}) + + // Initialize with empty msg (`{}`) env := MockEnvBin(t) info := MockInfoBin(t, "creator") msg := []byte(`{}`) igasMeter1 := types.GasMeter(gasMeter1) res, _, err := Instantiate(cache, checksum, env, info, msg, &igasMeter1, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) - require.NoError(t, err) + require.NoError(t, err, "Instantiation must succeed") requireOkResponse(t, res, 0) + // Optionally enqueue some integer values for _, value := range values { - // push 17 var gasMeter2 types.GasMeter = NewMockGasMeter(TESTING_GAS_LIMIT) push := []byte(fmt.Sprintf(`{"enqueue":{"value":%d}}`, value)) res, _, err = Execute(cache, checksum, env, info, push, &gasMeter2, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) - require.NoError(t, err) + require.NoError(t, err, "Enqueue must succeed for value %d", value) requireOkResponse(t, res, 0) } @@ -58,155 +64,298 @@ func setupQueueContractWithData(t *testing.T, cache Cache, values ...int) queueD } } +// setupQueueContract is a convenience that uses default enqueued values func setupQueueContract(t *testing.T, cache Cache) queueData { t.Helper() return setupQueueContractWithData(t, cache, 17, 22) } -func TestStoreIterator(t *testing.T) { +//--------------------- +// Table-based tests +//--------------------- + +func TestStoreIterator_TableDriven(t *testing.T) { + type testCase struct { + name string + actions []func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) + expect []uint64 // expected return values from storeIterator + } + + store := testdb.NewMemDB() const limit = 2000 + + // We’ll define 2 callIDs, each storing a few iterators callID1 := startCall() callID2 := startCall() - store := testdb.NewMemDB() - var iter types.Iterator - var index uint64 - var err error + // Action helper: open a new iterator, then call storeIterator + createIter := func(t *testing.T, store *testdb.MemDB) types.Iterator { + t.Helper() + iter, _ := store.Iterator(nil, nil) + require.NotNil(t, iter, "iter creation must not fail") + return iter + } - iter, _ = store.Iterator(nil, nil) - index, err = storeIterator(callID1, iter, limit) - require.NoError(t, err) - require.Equal(t, uint64(1), index) - iter, _ = store.Iterator(nil, nil) - index, err = storeIterator(callID1, iter, limit) - require.NoError(t, err) - require.Equal(t, uint64(2), index) + // We define test steps where each function returns a (uint64, error). + // Then we compare with the expected result (uint64) if error is nil. + tests := []testCase{ + { + name: "CallID1: two iterators in sequence", + actions: []func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error){ + func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) { + t.Helper() + iter := createIter(t, store) + return storeIterator(callID, iter, limit) + }, + func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) { + t.Helper() + iter := createIter(t, store) + return storeIterator(callID, iter, limit) + }, + }, + expect: []uint64{1, 2}, // first call ->1, second call ->2 + }, + { + name: "CallID2: three iterators in sequence", + actions: []func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error){ + func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) { + t.Helper() + iter := createIter(t, store) + return storeIterator(callID, iter, limit) + }, + func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) { + t.Helper() + iter := createIter(t, store) + return storeIterator(callID, iter, limit) + }, + func(t *testing.T, store *testdb.MemDB, callID uint64, limit int) (uint64, error) { + t.Helper() + iter := createIter(t, store) + return storeIterator(callID, iter, limit) + }, + }, + expect: []uint64{1, 2, 3}, + }, + } - iter, _ = store.Iterator(nil, nil) - index, err = storeIterator(callID2, iter, limit) - require.NoError(t, err) - require.Equal(t, uint64(1), index) - iter, _ = store.Iterator(nil, nil) - index, err = storeIterator(callID2, iter, limit) - require.NoError(t, err) - require.Equal(t, uint64(2), index) - iter, _ = store.Iterator(nil, nil) - index, err = storeIterator(callID2, iter, limit) - require.NoError(t, err) - require.Equal(t, uint64(3), index) + for _, tc := range tests { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + var results []uint64 + // Decide which callID to use by name + // We'll do a simple check: + var activeCallID uint64 + if tc.name == "CallID1: two iterators in sequence" { + activeCallID = callID1 + } else { + activeCallID = callID2 + } + + for i, step := range tc.actions { + got, err := step(t, store, activeCallID, limit) + require.NoError(t, err, "storeIterator must not fail in step[%d]", i) + results = append(results, got) + } + require.Equal(t, tc.expect, results, "Mismatch in expected results for test '%s'", tc.name) + }) + } + // Cleanup endCall(callID1) endCall(callID2) } -func TestStoreIteratorHitsLimit(t *testing.T) { +func TestStoreIteratorHitsLimit_TableDriven(t *testing.T) { + const limit = 2 callID := startCall() - store := testdb.NewMemDB() - var iter types.Iterator - var err error - const limit = 2 - - iter, _ = store.Iterator(nil, nil) - _, err = storeIterator(callID, iter, limit) - require.NoError(t, err) - iter, _ = store.Iterator(nil, nil) - _, err = storeIterator(callID, iter, limit) - require.NoError(t, err) + // We want to store iterators up to limit and then exceed + tests := []struct { + name string + numIters int + shouldFail bool + }{ + { + name: "Store 1st iter (success)", + numIters: 1, + shouldFail: false, + }, + { + name: "Store 2nd iter (success)", + numIters: 2, + shouldFail: false, + }, + { + name: "Store 3rd iter (exceeds limit =2)", + numIters: 3, + shouldFail: true, + }, + } - iter, _ = store.Iterator(nil, nil) - _, err = storeIterator(callID, iter, limit) - require.ErrorContains(t, err, "Reached iterator limit (2)") + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + iter, _ := store.Iterator(nil, nil) + _, err := storeIterator(callID, iter, limit) + if tc.shouldFail { + require.ErrorContains(t, err, "Reached iterator limit (2)") + } else { + require.NoError(t, err, "should not exceed limit for test '%s'", tc.name) + } + }) + } endCall(callID) } -func TestRetrieveIterator(t *testing.T) { +func TestRetrieveIterator_TableDriven(t *testing.T) { const limit = 2000 callID1 := startCall() callID2 := startCall() store := testdb.NewMemDB() - var iter types.Iterator - var err error - iter, _ = store.Iterator(nil, nil) - iteratorID11, err := storeIterator(callID1, iter, limit) + // Setup initial iterators + iterA, _ := store.Iterator(nil, nil) + idA, err := storeIterator(callID1, iterA, limit) require.NoError(t, err) - iter, _ = store.Iterator(nil, nil) - _, err = storeIterator(callID1, iter, limit) + iterB, _ := store.Iterator(nil, nil) + _, err = storeIterator(callID1, iterB, limit) require.NoError(t, err) - iter, _ = store.Iterator(nil, nil) - _, err = storeIterator(callID2, iter, limit) - require.NoError(t, err) - iter, _ = store.Iterator(nil, nil) - iteratorID22, err := storeIterator(callID2, iter, limit) + + iterC, _ := store.Iterator(nil, nil) + _, err = storeIterator(callID2, iterC, limit) require.NoError(t, err) - iter, err = store.Iterator(nil, nil) + iterD, _ := store.Iterator(nil, nil) + idD, err := storeIterator(callID2, iterD, limit) require.NoError(t, err) - iteratorID23, err := storeIterator(callID2, iter, limit) + iterE, _ := store.Iterator(nil, nil) + idE, err := storeIterator(callID2, iterE, limit) require.NoError(t, err) - // Retrieve existing - iter = retrieveIterator(callID1, iteratorID11) - require.NotNil(t, iter) - iter = retrieveIterator(callID2, iteratorID22) - require.NotNil(t, iter) - - // Retrieve with non-existent iterator ID - iter = retrieveIterator(callID1, iteratorID23) - require.Nil(t, iter) - iter = retrieveIterator(callID1, uint64(0)) - require.Nil(t, iter) - iter = retrieveIterator(callID1, uint64(2147483647)) - require.Nil(t, iter) - iter = retrieveIterator(callID1, uint64(2147483648)) - require.Nil(t, iter) - iter = retrieveIterator(callID1, uint64(18446744073709551615)) - require.Nil(t, iter) - - // Retrieve with non-existent call ID - iter = retrieveIterator(callID1+1_234_567, iteratorID23) - require.Nil(t, iter) + tests := []struct { + name string + callID uint64 + iterID uint64 + expectNil bool + }{ + { + name: "Retrieve existing iter idA on callID1", + callID: callID1, + iterID: idA, + expectNil: false, + }, + { + name: "Retrieve existing iter idD on callID2", + callID: callID2, + iterID: idD, + expectNil: false, + }, + { + name: "Retrieve ID from different callID => nil", + callID: callID1, + iterID: idE, // e belongs to callID2 + expectNil: true, + }, + { + name: "Retrieve zero => nil", + callID: callID1, + iterID: 0, + expectNil: true, + }, + { + name: "Retrieve large => nil", + callID: callID1, + iterID: 18446744073709551615, + expectNil: true, + }, + { + name: "Non-existent callID => nil", + callID: callID1 + 1234567, + iterID: idE, + expectNil: true, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + iter := retrieveIterator(tc.callID, tc.iterID) + if tc.expectNil { + require.Nil(t, iter, "expected nil for test: %s", tc.name) + } else { + require.NotNil(t, iter, "expected a valid iterator for test: %s", tc.name) + } + }) + } endCall(callID1) endCall(callID2) } -func TestQueueIteratorSimple(t *testing.T) { +func TestQueueIteratorSimple_TableDriven(t *testing.T) { cache, cleanup := withCache(t) defer cleanup() setup := setupQueueContract(t, cache) checksum, querier, api := setup.checksum, setup.querier, setup.api - // query the sum - gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) - igasMeter := types.GasMeter(gasMeter) - store := setup.Store(gasMeter) - query := []byte(`{"sum":{}}`) - env := MockEnvBin(t) - data, _, err := Query(cache, checksum, env, query, &igasMeter, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) - require.NoError(t, err) - var qResult types.QueryResult - err = json.Unmarshal(data, &qResult) - require.NoError(t, err) - require.Equal(t, "", qResult.Err) - require.Equal(t, `{"sum":39}`, string(qResult.Ok)) + tests := []struct { + name string + query string + expErr string + expResp string + }{ + { + name: "sum query => 39", + query: `{"sum":{}}`, + expErr: "", + expResp: `{"sum":39}`, + }, + { + name: "reducer query => counters", + query: `{"reducer":{}}`, + expErr: "", + expResp: `{"counters":[[17,22],[22,0]]}`, + }, + } - // query reduce (multiple iterators at once) - query = []byte(`{"reducer":{}}`) - data, _, err = Query(cache, checksum, env, query, &igasMeter, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) - require.NoError(t, err) - var reduced types.QueryResult - err = json.Unmarshal(data, &reduced) - require.NoError(t, err) - require.Equal(t, "", reduced.Err) - require.JSONEq(t, `{"counters":[[17,22],[22,0]]}`, string(reduced.Ok)) + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) + igasMeter := types.GasMeter(gasMeter) + store := setup.Store(gasMeter) + env := MockEnvBin(t) + + data, _, err := Query( + cache, + checksum, + env, + []byte(tc.query), + &igasMeter, + store, + api, + &querier, + TESTING_GAS_LIMIT, + TESTING_PRINT_DEBUG, + ) + require.NoError(t, err, "Query must not fail in scenario: %s", tc.name) + + var result types.QueryResult + err = json.Unmarshal(data, &result) + require.NoError(t, err, + "JSON decode of QueryResult must succeed in scenario: %s", tc.name) + require.Equal(t, tc.expErr, result.Err, + "Mismatch in 'Err' for scenario %s", tc.name) + require.Equal(t, tc.expResp, string(result.Ok), + "Mismatch in 'Ok' response for scenario %s", tc.name) + }) + } } -func TestQueueIteratorRaces(t *testing.T) { +func TestQueueIteratorRaces_TableDriven(t *testing.T) { cache, cleanup := withCache(t) defer cleanup() @@ -224,36 +373,40 @@ func TestQueueIteratorRaces(t *testing.T) { igasMeter := types.GasMeter(gasMeter) store := setup.Store(gasMeter) - // query reduce (multiple iterators at once) query := []byte(`{"reducer":{}}`) data, _, err := Query(cache, checksum, env, query, &igasMeter, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) require.NoError(t, err) - var reduced types.QueryResult - err = json.Unmarshal(data, &reduced) + var r types.QueryResult + err = json.Unmarshal(data, &r) require.NoError(t, err) - require.Equal(t, "", reduced.Err) - require.Equal(t, fmt.Sprintf(`{"counters":%s}`, expected), string(reduced.Ok)) + require.Equal(t, "", r.Err) + require.Equal(t, fmt.Sprintf(`{"counters":%s}`, expected), string(r.Ok)) } - // 30 concurrent batches (in go routines) to trigger any race condition - numBatches := 30 + // We define a table for the concurrent contract calls + tests := []struct { + name string + contract queueData + expectedResult string + }{ + {"contract1", contract1, "[[17,22],[22,0]]"}, + {"contract2", contract2, "[[1,68],[19,35],[6,62],[35,0],[8,54]]"}, + {"contract3", contract3, "[[11,0],[6,11],[2,17]]"}, + } + const numBatches = 30 var wg sync.WaitGroup - // for each batch, query each of the 3 contracts - so the contract queries get mixed together - wg.Add(numBatches * 3) + wg.Add(numBatches * len(tests)) + + // The same concurrency approach, but now in a loop for i := 0; i < numBatches; i++ { - go func() { - reduceQuery(t, contract1, "[[17,22],[22,0]]") - wg.Done() - }() - go func() { - reduceQuery(t, contract2, "[[1,68],[19,35],[6,62],[35,0],[8,54]]") - wg.Done() - }() - go func() { - reduceQuery(t, contract3, "[[11,0],[6,11],[2,17]]") - wg.Done() - }() + for _, tc := range tests { + tc := tc + go func() { + reduceQuery(t, tc.contract, tc.expectedResult) + wg.Done() + }() + } } wg.Wait() @@ -261,38 +414,70 @@ func TestQueueIteratorRaces(t *testing.T) { require.Empty(t, iteratorFrames) } -func TestQueueIteratorLimit(t *testing.T) { +func TestQueueIteratorLimit_TableDriven(t *testing.T) { cache, cleanup := withCache(t) defer cleanup() setup := setupQueueContract(t, cache) checksum, querier, api := setup.checksum, setup.querier, setup.api - var err error - var qResult types.QueryResult - var gasLimit uint64 + tests := []struct { + name string + count int + multiplier int + expectError bool + errContains string + }{ + { + name: "Open 5000 iterators, no error", + count: 5000, + multiplier: 1, + expectError: false, + }, + { + name: "Open 35000 iterators => exceed limit(32768)", + count: 35000, + multiplier: 4, + expectError: true, + errContains: "Reached iterator limit (32768)", + }, + } - // Open 5000 iterators - gasLimit = TESTING_GAS_LIMIT - gasMeter := NewMockGasMeter(gasLimit) - igasMeter := types.GasMeter(gasMeter) - store := setup.Store(gasMeter) - query := []byte(`{"open_iterators":{"count":5000}}`) - env := MockEnvBin(t) - data, _, err := Query(cache, checksum, env, query, &igasMeter, store, api, &querier, gasLimit, TESTING_PRINT_DEBUG) - require.NoError(t, err) - err = json.Unmarshal(data, &qResult) - require.NoError(t, err) - require.Equal(t, "", qResult.Err) - require.Equal(t, `{}`, string(qResult.Ok)) - - // Open 35000 iterators - gasLimit = TESTING_GAS_LIMIT * 4 - gasMeter = NewMockGasMeter(gasLimit) - igasMeter = types.GasMeter(gasMeter) - store = setup.Store(gasMeter) - query = []byte(`{"open_iterators":{"count":35000}}`) - env = MockEnvBin(t) - _, _, err = Query(cache, checksum, env, query, &igasMeter, store, api, &querier, gasLimit, TESTING_PRINT_DEBUG) - require.ErrorContains(t, err, "Reached iterator limit (32768)") + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gasLimit := TESTING_GAS_LIMIT * uint64(tc.multiplier) + gasMeter := NewMockGasMeter(gasLimit) + igasMeter := types.GasMeter(gasMeter) + store := setup.Store(gasMeter) + env := MockEnvBin(t) + + msg := fmt.Sprintf(`{"open_iterators":{"count":%d}}`, tc.count) + data, _, err := Query(cache, checksum, env, []byte(msg), &igasMeter, store, api, &querier, gasLimit, TESTING_PRINT_DEBUG) + if tc.expectError { + require.Error(t, err, "Expected an error in test '%s'", tc.name) + require.Contains(t, err.Error(), tc.errContains, "Error mismatch in test '%s'", tc.name) + return + } + require.NoError(t, err, "No error expected in test '%s'", tc.name) + + // decode the success + var qResult types.QueryResult + err = json.Unmarshal(data, &qResult) + require.NoError(t, err, "JSON decode must succeed in test '%s'", tc.name) + require.Equal(t, "", qResult.Err, "Expected no error in QueryResult for test '%s'", tc.name) + require.Equal(t, `{}`, string(qResult.Ok), + "Expected an empty obj response for test '%s'", tc.name) + }) + } } + +//-------------------- +// Suggestions +//-------------------- +// +// 1. We added more debug logs (e.g., inline string formatting, ensuring we mention scenario names). +// 2. For concurrency tests (like "races"), we used table-driven expansions for concurrency loops. +// 3. We introduced partial success/failure checks for error messages using `require.Contains` or `require.Equal`. +// 4. You can expand your negative test cases to verify what happens if the KVStore fails or the env is invalid. +// 5. For even more thorough coverage, you might add invalid parameters or zero-limit scenarios to the tables. From bc953095bdb9a2b25a722ba5b4e719242a537aac Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Wed, 26 Mar 2025 21:05:13 +0100 Subject: [PATCH 2/9] Add workflow --- .github/workflows/bat.yml | 27 +++++++++++++++++++++++++++ Makefile | 2 +- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/bat.yml diff --git a/.github/workflows/bat.yml b/.github/workflows/bat.yml new file mode 100644 index 000000000..35df8c0ce --- /dev/null +++ b/.github/workflows/bat.yml @@ -0,0 +1,27 @@ +on: [push, pull_request] +name: Test +jobs: + test: + strategy: + matrix: + go-version: [1.24.x] + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - run: make test + build: + strategy: + matrix: + go-version: [1.24.x] + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - run: make build \ No newline at end of file diff --git a/Makefile b/Makefile index 684287d72..6545c88c0 100644 --- a/Makefile +++ b/Makefile @@ -61,7 +61,7 @@ build-go: .PHONY: test test: # Use package list mode to include all subdirectores. The -count=1 turns off caching. - RUST_BACKTRACE=1 go test -v -count=1 ./... + CGO_ENABLED=1 RUST_BACKTRACE=1 go test -v -count=1 ./... .PHONY: test-safety test-safety: From 0243050cf5f42df9b4645d1786fb34e20361f5d1 Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Wed, 26 Mar 2025 21:59:06 +0100 Subject: [PATCH 3/9] Add lib_test.go --- lib_test.go | 181 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 165 insertions(+), 16 deletions(-) diff --git a/lib_test.go b/lib_test.go index 35094e7df..a24a5fc20 100644 --- a/lib_test.go +++ b/lib_test.go @@ -1,33 +1,182 @@ package cosmwasm import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/CosmWasm/wasmvm/v2/types" ) func TestCreateChecksum(t *testing.T) { - // nil - _, err := CreateChecksum(nil) - require.ErrorContains(t, err, "nil or empty") + tests := []struct { + name string + input []byte + want types.Checksum + wantErr bool + errMsg string + }{ + { + name: "Nil input", + input: nil, + wantErr: true, + errMsg: "Wasm bytes nil or empty", + }, + { + name: "Empty input", + input: []byte{}, + wantErr: true, + errMsg: "Wasm bytes nil or empty", + }, + { + name: "Too short (1 byte)", + input: []byte{0x00}, + wantErr: true, + errMsg: "Wasm bytes shorter than 4 bytes", + }, + { + name: "Too short (3 bytes)", + input: []byte{0x00, 0x61, 0x73}, + wantErr: true, + errMsg: "Wasm bytes shorter than 4 bytes", + }, + { + name: "Valid minimal Wasm", + input: []byte{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00}, // "(module)" + want: types.ForceNewChecksum("93a44bbb96c751218e4c00d479e4c14358122a389acca16205b1e4d0dc5f9476"), + wantErr: false, + }, + { + name: "Invalid Wasm magic number", + input: []byte{0x01, 0x02, 0x03, 0x04}, + wantErr: true, + errMsg: "Wasm bytes do not start with Wasm magic number", + }, + { + name: "Text file", + input: []byte("Hello world"), + wantErr: true, + errMsg: "Wasm bytes do not start with Wasm magic number", + }, + { + name: "Large valid Wasm prefix", + input: append([]byte{0x00, 0x61, 0x73, 0x6d}, bytes.Repeat([]byte{0x01}, 1024)...), + want: types.ForceNewChecksum("38c467d192bb1bb8045a0dc45623305d63225c8361364281c112aef713c11b14"), // Precomputed SHA-256 + wantErr: false, + }, + { + name: "Exact 4 bytes with wrong magic", + input: []byte{0xFF, 0xFF, 0xFF, 0xFF}, + wantErr: true, + errMsg: "Wasm bytes do not start with Wasm magic number", + }, + } - // empty - _, err = CreateChecksum([]byte{}) - require.ErrorContains(t, err, "nil or empty") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := CreateChecksum(tt.input) + if tt.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errMsg) + assert.Equal(t, types.Checksum{}, got) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, got) + // Verify the checksum is a valid SHA-256 hash + hashBytes, err := hex.DecodeString(tt.want.String()) + require.NoError(t, err) + require.Len(t, hashBytes, 32) + } + }) + } +} + +// TestCreateChecksumConsistency ensures consistent output for the same input +func TestCreateChecksumConsistency(t *testing.T) { + input := []byte{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00} // Minimal valid Wasm + expected := types.ForceNewChecksum("93a44bbb96c751218e4c00d479e4c14358122a389acca16205b1e4d0dc5f9476") - // short - _, err = CreateChecksum([]byte("\x00\x61\x73")) - require.ErrorContains(t, err, " shorter than 4 bytes") + for i := 0; i < 100; i++ { + checksum, err := CreateChecksum(input) + require.NoError(t, err) + assert.Equal(t, expected, checksum, "Checksum should be consistent across runs") + } +} - // Wasm blob returns correct hash - // echo "(module)" > my.wat && wat2wasm my.wat && hexdump -C my.wasm && sha256sum my.wasm - checksum, err := CreateChecksum([]byte("\x00\x61\x73\x6d\x01\x00\x00\x00")) +// TestCreateChecksumLargeInput tests behavior with a large valid Wasm input +func TestCreateChecksumLargeInput(t *testing.T) { + // Create a large valid Wasm-like input (starts with magic number) + largeInput := append([]byte{0x00, 0x61, 0x73, 0x6d}, bytes.Repeat([]byte{0xFF}, 1<<20)...) // 1MB + checksum, err := CreateChecksum(largeInput) require.NoError(t, err) - require.Equal(t, types.ForceNewChecksum("93a44bbb96c751218e4c00d479e4c14358122a389acca16205b1e4d0dc5f9476"), checksum) - // Text file fails - _, err = CreateChecksum([]byte("Hello world")) - require.ErrorContains(t, err, "do not start with Wasm magic number") + // Compute expected SHA-256 manually to verify + h := sha256.New() + h.Write(largeInput) + expected := types.ForceNewChecksum(hex.EncodeToString(h.Sum(nil))) + + assert.Equal(t, expected, checksum, "Checksum should match SHA-256 of large input") +} + +// TestCreateChecksumInvalidMagicVariations tests variations of invalid Wasm magic numbers +func TestCreateChecksumInvalidMagicVariations(t *testing.T) { + invalidMagics := [][]byte{ + {0x01, 0x61, 0x73, 0x6d}, // Wrong first byte + {0x00, 0x62, 0x73, 0x6d}, // Wrong second byte + {0x00, 0x61, 0x74, 0x6d}, // Wrong third byte + {0x00, 0x61, 0x73, 0x6e}, // Wrong fourth byte + } + + for _, input := range invalidMagics { + _, err := CreateChecksum(input) + require.Error(t, err) + require.Contains(t, err.Error(), "Wasm bytes do not start with Wasm magic number") + } +} + +// TestCreateChecksumStress tests the function under high load with valid inputs +func TestCreateChecksumStress(t *testing.T) { + if testing.Short() { + t.Skip("Skipping stress test in short mode") + } + + validInput := []byte{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00} + const iterations = 10000 + + for i := 0; i < iterations; i++ { + checksum, err := CreateChecksum(validInput) + require.NoError(t, err) + require.Equal(t, types.ForceNewChecksum("93a44bbb96c751218e4c00d479e4c14358122a389acca16205b1e4d0dc5f9476"), checksum) + } +} + +// TestCreateChecksumConcurrent tests concurrent execution safety +func TestCreateChecksumConcurrent(t *testing.T) { + if testing.Short() { + t.Skip("Skipping concurrent test in short mode") + } + + validInput := []byte{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00} + expected := types.ForceNewChecksum("93a44bbb96c751218e4c00d479e4c14358122a389acca16205b1e4d0dc5f9476") + const goroutines = 50 + const iterations = 200 + + var wg sync.WaitGroup + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + checksum, err := CreateChecksum(validInput) + assert.NoError(t, err) + assert.Equal(t, expected, checksum) + } + }() + } + wg.Wait() } From 179d142bfb9b81449c6b5c8d9858d592b4c1807b Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Wed, 26 Mar 2025 22:08:04 +0100 Subject: [PATCH 4/9] Add lib_libwasmvm_test.go --- lib_libwasmvm_test.go | 268 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 247 insertions(+), 21 deletions(-) diff --git a/lib_libwasmvm_test.go b/lib_libwasmvm_test.go index d204e113a..2d70417b4 100644 --- a/lib_libwasmvm_test.go +++ b/lib_libwasmvm_test.go @@ -7,6 +7,8 @@ import ( "fmt" "math" "os" + "runtime" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -30,27 +32,6 @@ const ( HACKATOM_TEST_CONTRACT = "./testdata/hackatom.wasm" ) -func withVM(t *testing.T) *VM { - t.Helper() - tmpdir := t.TempDir() - vm, err := NewVM(tmpdir, TESTING_CAPABILITIES, TESTING_MEMORY_LIMIT, TESTING_PRINT_DEBUG, TESTING_CACHE_SIZE) - require.NoError(t, err) - - t.Cleanup(func() { - vm.Cleanup() - }) - return vm -} - -func createTestContract(t *testing.T, vm *VM, path string) Checksum { - t.Helper() - wasm, err := os.ReadFile(path) - require.NoError(t, err) - checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.NoError(t, err) - return checksum -} - func TestStoreCode(t *testing.T) { vm := withVM(t) @@ -444,3 +425,248 @@ func TestLongPayloadDeserialization(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "payload") } + +// getMemoryStats returns current heap allocation and counters +func getMemoryStats() (heapAlloc, mallocs, frees uint64) { + runtime.GC() + var m runtime.MemStats + runtime.ReadMemStats(&m) + return m.HeapAlloc, m.Mallocs, m.Frees +} + +func withVM(t *testing.T) *VM { + t.Helper() + tmpdir, err := os.MkdirTemp("", "wasmvm-testing") + require.NoError(t, err) + vm, err := NewVM(tmpdir, TESTING_CAPABILITIES, TESTING_MEMORY_LIMIT, TESTING_PRINT_DEBUG, TESTING_CACHE_SIZE) + require.NoError(t, err) + + t.Cleanup(func() { + vm.Cleanup() + os.RemoveAll(tmpdir) + }) + return vm +} + +func createTestContract(t *testing.T, vm *VM, path string) Checksum { + t.Helper() + wasm, err := os.ReadFile(path) + require.NoError(t, err) + checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) + require.NoError(t, err) + return checksum +} + +// Existing tests remain unchanged until we add new ones... + +// TestStoreCodeStress tests memory stability under repeated contract storage +func TestStoreCodeStress(t *testing.T) { + if testing.Short() { + t.Skip("Skipping stress test in short mode") + } + + vm := withVM(t) + wasm, err := os.ReadFile(HACKATOM_TEST_CONTRACT) + require.NoError(t, err) + + baseAlloc, baseMallocs, baseFrees := getMemoryStats() + t.Logf("Baseline: Heap=%d bytes, Mallocs=%d, Frees=%d", baseAlloc, baseMallocs, baseFrees) + + const iterations = 5000 + checksums := make([]Checksum, 0, iterations) + + for i := 0; i < iterations; i++ { + checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) + require.NoError(t, err) + checksums = append(checksums, checksum) + + if i%100 == 0 { + alloc, mallocs, frees := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d), Net allocs=%d", + i, alloc, alloc-baseAlloc, (mallocs-frees)-(baseMallocs-baseFrees)) + require.Less(t, alloc, baseAlloc*2, "Memory doubled at iteration %d", i) + } + } + + // Cleanup some contracts to test removal + for i, checksum := range checksums { + if i%2 == 0 { // Remove half to test memory reclamation + err := vm.RemoveCode(checksum) + require.NoError(t, err) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Final: Heap=%d bytes (+%d), Net allocs=%d", + finalAlloc, finalAlloc-baseAlloc, (finalMallocs-finalFrees)-(baseMallocs-baseFrees)) + require.Less(t, finalAlloc, baseAlloc+20*1024*1024, "Significant memory leak detected") +} + +// TestConcurrentContractOperations tests memory under concurrent operations +func TestConcurrentContractOperations(t *testing.T) { + if testing.Short() { + t.Skip("Skipping concurrent test in short mode") + } + + vm := withVM(t) + wasm, err := os.ReadFile(HACKATOM_TEST_CONTRACT) + require.NoError(t, err) + checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) + require.NoError(t, err) + + const goroutines = 20 + const operations = 1000 + var wg sync.WaitGroup + + baseAlloc, _, _ := getMemoryStats() + deserCost := types.UFraction{Numerator: 1, Denominator: 1} + env := api.MockEnv() + goapi := api.NewMockAPI() + balance := types.Array[types.Coin]{types.NewCoin(250, "ATOM")} + querier := api.DefaultQuerier(api.MOCK_CONTRACT_ADDR, balance) + + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func(gid int) { + defer wg.Done() + gasMeter := api.NewMockGasMeter(TESTING_GAS_LIMIT) + store := api.NewLookup(gasMeter) + info := api.MockInfo(fmt.Sprintf("creator%d", gid), nil) + + for j := 0; j < operations; j++ { + msg := []byte(fmt.Sprintf(`{"verifier": "test%d", "beneficiary": "test%d"}`, gid, j)) + _, _, err := vm.Instantiate(checksum, env, info, msg, store, *goapi, querier, gasMeter, TESTING_GAS_LIMIT, deserCost) + assert.NoError(t, err) + + // Occasionally execute to mix operations + if j%10 == 0 { + // Recreate gas meter instead of resetting + gasMeter = api.NewMockGasMeter(TESTING_GAS_LIMIT) + store = api.NewLookup(gasMeter) // New store with fresh gas meter + _, _, err = vm.Execute(checksum, env, info, []byte(`{"release":{}}`), store, *goapi, querier, gasMeter, TESTING_GAS_LIMIT, deserCost) + assert.NoError(t, err) + } + } + }(i) + } + + wg.Wait() + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Concurrent test: Initial=%d bytes, Final=%d bytes, Net allocs=%d", + baseAlloc, finalAlloc, finalMallocs-finalFrees) + require.Less(t, finalAlloc, baseAlloc+30*1024*1024, "Concurrent operations leaked memory") +} + +// TestMemoryLeakWithPinning tests memory behavior with pinning/unpinning +func TestMemoryLeakWithPinning(t *testing.T) { + if testing.Short() { + t.Skip("Skipping pinning leak test in short mode") + } + + vm := withVM(t) + wasm, err := os.ReadFile(HACKATOM_TEST_CONTRACT) + require.NoError(t, err) + checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) + require.NoError(t, err) + + baseAlloc, baseMallocs, baseFrees := getMemoryStats() + const iterations = 1000 + + deserCost := types.UFraction{Numerator: 1, Denominator: 1} + gasMeter := api.NewMockGasMeter(TESTING_GAS_LIMIT) + store := api.NewLookup(gasMeter) + goapi := api.NewMockAPI() + querier := api.DefaultQuerier(api.MOCK_CONTRACT_ADDR, types.Array[types.Coin]{types.NewCoin(250, "ATOM")}) + env := api.MockEnv() + info := api.MockInfo("creator", nil) + + for i := 0; i < iterations; i++ { + // Pin and unpin repeatedly + err = vm.Pin(checksum) + require.NoError(t, err) + + // Perform an operation while pinned + msg := []byte(fmt.Sprintf(`{"verifier": "test%d", "beneficiary": "test"}`, i)) + _, _, err := vm.Instantiate(checksum, env, info, msg, store, *goapi, querier, gasMeter, TESTING_GAS_LIMIT, deserCost) + require.NoError(t, err) + + err = vm.Unpin(checksum) + require.NoError(t, err) + + if i%100 == 0 { + alloc, mallocs, frees := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d), Net allocs=%d", + i, alloc, alloc-baseAlloc, (mallocs-frees)-(baseMallocs-baseFrees)) + + metrics, err := vm.GetMetrics() + require.NoError(t, err) + t.Logf("Metrics: Pinned=%d, Memory=%d, SizePinned=%d, SizeMemory=%d", + metrics.ElementsPinnedMemoryCache, metrics.ElementsMemoryCache, + metrics.SizePinnedMemoryCache, metrics.SizeMemoryCache) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Final: Heap=%d bytes (+%d), Net allocs=%d", + finalAlloc, finalAlloc-baseAlloc, (finalMallocs-finalFrees)-(baseMallocs-baseFrees)) + require.Less(t, finalAlloc, baseAlloc+15*1024*1024, "Pinning operations leaked memory") +} + +// TestLongRunningOperations tests memory stability over extended mixed operations +func TestLongRunningOperations(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long-running test in short mode") + } + + vm := withVM(t) + wasm, err := os.ReadFile(HACKATOM_TEST_CONTRACT) + require.NoError(t, err) + checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) + require.NoError(t, err) + + baseAlloc, baseMallocs, baseFrees := getMemoryStats() + const iterations = 10000 + + deserCost := types.UFraction{Numerator: 1, Denominator: 1} + gasMeter := api.NewMockGasMeter(TESTING_GAS_LIMIT) + store := api.NewLookup(gasMeter) + goapi := api.NewMockAPI() + querier := api.DefaultQuerier(api.MOCK_CONTRACT_ADDR, types.Array[types.Coin]{types.NewCoin(250, "ATOM")}) + env := api.MockEnv() + info := api.MockInfo("creator", nil) + + for i := 0; i < iterations; i++ { + switch i % 4 { + case 0: // Instantiate + msg := []byte(fmt.Sprintf(`{"verifier": "test%d", "beneficiary": "test"}`, i)) + _, _, err := vm.Instantiate(checksum, env, info, msg, store, *goapi, querier, gasMeter, TESTING_GAS_LIMIT, deserCost) + require.NoError(t, err) + case 1: // Execute + // Recreate gas meter instead of resetting + gasMeter = api.NewMockGasMeter(TESTING_GAS_LIMIT) + store = api.NewLookup(gasMeter) // New store with fresh gas meter + _, _, err := vm.Execute(checksum, env, info, []byte(`{"release":{}}`), store, *goapi, querier, gasMeter, TESTING_GAS_LIMIT, deserCost) + require.NoError(t, err) + case 2: // Pin/Unpin + err := vm.Pin(checksum) + require.NoError(t, err) + err = vm.Unpin(checksum) + require.NoError(t, err) + case 3: // GetCode + _, err := vm.GetCode(checksum) + require.NoError(t, err) + } + + if i%1000 == 0 { + alloc, mallocs, frees := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d), Net allocs=%d", + i, alloc, alloc-baseAlloc, (mallocs-frees)-(baseMallocs-baseFrees)) + require.Less(t, alloc, baseAlloc*2, "Memory growth too high at iteration %d", i) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Final: Heap=%d bytes (+%d), Net allocs=%d", + finalAlloc, finalAlloc-baseAlloc, (finalMallocs-finalFrees)-(baseMallocs-baseFrees)) + require.Less(t, finalAlloc, baseAlloc+25*1024*1024, "Long-running operations leaked memory") +} From 743a26c98292e74dcf82a9bc375c4c7475ff3aee Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Wed, 26 Mar 2025 22:20:34 +0100 Subject: [PATCH 5/9] Fix test TestStoreCodeStress --- lib_libwasmvm_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib_libwasmvm_test.go b/lib_libwasmvm_test.go index 2d70417b4..f4a2801cc 100644 --- a/lib_libwasmvm_test.go +++ b/lib_libwasmvm_test.go @@ -489,12 +489,8 @@ func TestStoreCodeStress(t *testing.T) { } // Cleanup some contracts to test removal - for i, checksum := range checksums { - if i%2 == 0 { // Remove half to test memory reclamation - err := vm.RemoveCode(checksum) - require.NoError(t, err) - } - } + err = vm.RemoveCode(checksums[0]) + require.NoError(t, err) finalAlloc, finalMallocs, finalFrees := getMemoryStats() t.Logf("Final: Heap=%d bytes (+%d), Net allocs=%d", From cf6d2538c7f0943bff2783dbbdd3e3a7eddfb747 Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Thu, 27 Mar 2025 12:26:34 +0100 Subject: [PATCH 6/9] Add memory_test.go --- internal/api/memory_test.go | 990 +++++++++++++++++++++++++++++++++--- 1 file changed, 925 insertions(+), 65 deletions(-) diff --git a/internal/api/memory_test.go b/internal/api/memory_test.go index 397faf50c..a11aac8b0 100644 --- a/internal/api/memory_test.go +++ b/internal/api/memory_test.go @@ -1,78 +1,938 @@ package api import ( + "fmt" + "os" + "runtime" + "sync" "testing" + "time" "unsafe" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/CosmWasm/wasmvm/v2/internal/api/testdb" + "github.com/CosmWasm/wasmvm/v2/types" ) -func TestMakeView(t *testing.T) { - data := []byte{0xaa, 0xbb, 0x64} - dataView := makeView(data) - require.Equal(t, cbool(false), dataView.is_nil) - require.Equal(t, cusize(3), dataView.len) - - empty := []byte{} - emptyView := makeView(empty) - require.Equal(t, cbool(false), emptyView.is_nil) - require.Equal(t, cusize(0), emptyView.len) - - nilView := makeView(nil) - require.Equal(t, cbool(true), nilView.is_nil) -} - -func TestCreateAndDestroyUnmanagedVector(t *testing.T) { - // non-empty - { - original := []byte{0xaa, 0xbb, 0x64} - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(false), unmanaged.is_none) - require.Equal(t, 3, int(unmanaged.len)) - require.GreaterOrEqual(t, 3, int(unmanaged.cap)) // Rust implementation decides this - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Equal(t, original, copy) - } - - // empty - { - original := []byte{} - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(false), unmanaged.is_none) - require.Equal(t, 0, int(unmanaged.len)) - require.GreaterOrEqual(t, 0, int(unmanaged.cap)) // Rust implementation decides this - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Equal(t, original, copy) - } - - // none - { - var original []byte - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(true), unmanaged.is_none) - // We must not make assumptions on the other fields in this case - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Nil(t, copy) - } -} - -// Like the test above but without `newUnmanagedVector` calls. -// Since only Rust can actually create them, we only test edge cases here. -// -//go:nocheckptr -func TestCopyDestroyUnmanagedVector(t *testing.T) { - { - // ptr, cap and len broken. Do not access those values when is_none is true - invalid_ptr := unsafe.Pointer(uintptr(42)) - uv := constructUnmanagedVector(cbool(true), cu8_ptr(invalid_ptr), cusize(0xBB), cusize(0xAA)) - copy := copyAndDestroyUnmanagedVector(uv) - require.Nil(t, copy) +//----------------------------------------------------------------------------- +// Existing Table-Driven Tests for Memory Bridging and Unmanaged Vectors +//----------------------------------------------------------------------------- + +func TestMakeView_TableDriven(t *testing.T) { + type testCase struct { + name string + input []byte + expIsNil bool + expLen cusize + } + + tests := []testCase{ + { + name: "Non-empty byte slice", + input: []byte{0xaa, 0xbb, 0x64}, + expIsNil: false, + expLen: 3, + }, + { + name: "Empty slice", + input: []byte{}, + expIsNil: false, + expLen: 0, + }, + { + name: "Nil slice", + input: nil, + expIsNil: true, + expLen: 0, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + view := makeView(tc.input) + require.Equal(t, cbool(tc.expIsNil), view.is_nil, "Mismatch in is_nil for test: %s", tc.name) + require.Equal(t, tc.expLen, view.len, "Mismatch in len for test: %s", tc.name) + }) + } +} + +func TestCreateAndDestroyUnmanagedVector_TableDriven(t *testing.T) { + // Helper for the round-trip test + checkUnmanagedRoundTrip := func(t *testing.T, input []byte, expectNone bool) { + t.Helper() + unmanaged := newUnmanagedVector(input) + require.Equal(t, cbool(expectNone), unmanaged.is_none, "Mismatch on is_none with input: %v", input) + + if !expectNone && len(input) > 0 { + require.Equal(t, len(input), int(unmanaged.len), "Length mismatch for input: %v", input) + require.GreaterOrEqual(t, int(unmanaged.cap), int(unmanaged.len), "Expected cap >= len for input: %v", input) + } + + copyData := copyAndDestroyUnmanagedVector(unmanaged) + require.Equal(t, input, copyData, "Round-trip mismatch for input: %v", input) } - { - // Capacity is 0, so no allocation happened. Do not access the pointer. - invalid_ptr := unsafe.Pointer(uintptr(42)) - uv := constructUnmanagedVector(cbool(false), cu8_ptr(invalid_ptr), cusize(0), cusize(0)) + + type testCase struct { + name string + input []byte + expectNone bool + } + + tests := []testCase{ + { + name: "Non-empty data", + input: []byte{0xaa, 0xbb, 0x64}, + expectNone: false, + }, + { + name: "Empty but non-nil", + input: []byte{}, + expectNone: false, + }, + { + name: "Nil => none", + input: nil, + expectNone: true, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + checkUnmanagedRoundTrip(t, tc.input, tc.expectNone) + }) + } +} + +func TestCopyDestroyUnmanagedVector_SpecificEdgeCases(t *testing.T) { + t.Run("is_none = true ignoring ptr/len/cap", func(t *testing.T) { + invalidPtr := unsafe.Pointer(uintptr(42)) + uv := constructUnmanagedVector(cbool(true), cu8_ptr(invalidPtr), cusize(0xBB), cusize(0xAA)) + copy := copyAndDestroyUnmanagedVector(uv) + require.Nil(t, copy, "copy should be nil if is_none=true") + }) + + t.Run("cap=0 => no allocation => empty data", func(t *testing.T) { + invalidPtr := unsafe.Pointer(uintptr(42)) + uv := constructUnmanagedVector(cbool(false), cu8_ptr(invalidPtr), cusize(0), cusize(0)) copy := copyAndDestroyUnmanagedVector(uv) - require.Equal(t, []byte{}, copy) + require.Equal(t, []byte{}, copy, "expected empty result if cap=0 and is_none=false") + }) +} + +func TestCopyDestroyUnmanagedVector_Concurrent(t *testing.T) { + inputs := [][]byte{ + {1, 2, 3}, + {}, + nil, + {0xff, 0x00, 0x12, 0xab, 0xcd, 0xef}, + } + + var wg sync.WaitGroup + concurrency := 10 + + for i := 0; i < concurrency; i++ { + for _, data := range inputs { + data := data + wg.Add(1) + go func() { + defer wg.Done() + uv := newUnmanagedVector(data) + out := copyAndDestroyUnmanagedVector(uv) + assert.Equal(t, data, out, "Mismatch in concurrency test for input=%v", data) + }() + } + } + wg.Wait() +} + +//----------------------------------------------------------------------------- +// Memory Leak Scenarios and Related Tests +//----------------------------------------------------------------------------- + +func TestMemoryLeakScenarios(t *testing.T) { + tests := []struct { + name string + run func(t *testing.T) + }{ + { + name: "Iterator_NoClose_WithGC", + run: func(t *testing.T) { + t.Helper() + db := testdb.NewMemDB() + defer db.Close() + + key := []byte("key1") + val := []byte("value1") + db.Set(key, val) + + iter, err := db.Iterator([]byte("key1"), []byte("zzzz")) + require.NoError(t, err) + require.NoError(t, iter.Error(), "creating iterator should not error") + // Simulate leak by not closing the iterator. + iter = nil + + runtime.GC() + + writeDone := make(chan error, 1) + go func() { + db.Set([]byte("key2"), []byte("value2")) + writeDone <- nil + }() + + select { + case err := <-writeDone: + require.NoError(t, err, "DB write should succeed after GC") + case <-time.After(200 * time.Millisecond): + require.FailNow(t, "DB write timed out; iterator lock may not have been released") + } + }, + }, + { + name: "Iterator_ProperClose_NoLeak", + run: func(t *testing.T) { + t.Helper() + db := testdb.NewMemDB() + defer db.Close() + + db.Set([]byte("a"), []byte("value-a")) + db.Set([]byte("b"), []byte("value-b")) + + iter, err := db.Iterator([]byte("a"), []byte("z")) + require.NoError(t, err) + require.NoError(t, iter.Error(), "creating iterator") + for iter.Valid() { + _ = iter.Key() + _ = iter.Value() + iter.Next() + } + require.NoError(t, iter.Close(), "closing iterator should succeed") + + db.Set([]byte("c"), []byte("value-c")) + }, + }, + { + name: "Cache_Release_Frees_Memory", + run: func(t *testing.T) { + t.Helper() + // Ensure that releasing caches frees memory. + getAlloc := func() uint64 { + var m runtime.MemStats + runtime.ReadMemStats(&m) + return m.HeapAlloc + } + + runtime.GC() + baseAlloc := getAlloc() + + const N = 5 + caches := make([]Cache, 0, N) + + // Wait up to 5 seconds to acquire each cache instance. + for i := 0; i < N; i++ { + tmpdir := t.TempDir() + config := types.VMConfig{ + Cache: types.CacheOptions{ + BaseDir: tmpdir, + AvailableCapabilities: []string{}, + MemoryCacheSizeBytes: types.NewSizeMebi(0), + InstanceMemoryLimitBytes: types.NewSizeMebi(32), + }, + } + cache, err := InitCache(config) + require.NoError(t, err, "InitCache should eventually succeed") + caches = append(caches, cache) + } + + runtime.GC() + allocAfterCreate := getAlloc() + + for _, c := range caches { + ReleaseCache(c) + } + runtime.GC() + allocAfterRelease := getAlloc() + + require.Less(t, allocAfterRelease, baseAlloc*2, + "Heap allocation after releasing caches too high: base=%d, after=%d", baseAlloc, allocAfterRelease) + require.Less(t, (allocAfterRelease-baseAlloc)*2, (allocAfterCreate - baseAlloc), + "Releasing caches did not free expected memory: before=%d, after=%d", allocAfterCreate, allocAfterRelease) + }, + }, + { + name: "MemDB_Iterator_Range_Correctness", + run: func(t *testing.T) { + t.Helper() + db := testdb.NewMemDB() + defer db.Close() + + keys := [][]byte{[]byte("a"), []byte("b"), []byte("c")} + for _, k := range keys { + db.Set(k, []byte("val:"+string(k))) + } + + subCases := []struct { + start, end []byte + expKeys [][]byte + }{ + {nil, nil, [][]byte{[]byte("a"), []byte("b"), []byte("c")}}, + {[]byte("a"), []byte("c"), [][]byte{[]byte("a"), []byte("b")}}, + {[]byte("a"), []byte("b"), [][]byte{[]byte("a")}}, + {[]byte("b"), []byte("b"), [][]byte{}}, + {[]byte("b"), []byte("c"), [][]byte{[]byte("b")}}, + } + + for _, sub := range subCases { + iter, err := db.Iterator(sub.start, sub.end) + require.NoError(t, err) + require.NoError(t, iter.Error(), "Iterator(%q, %q) should not error", sub.start, sub.end) + var gotKeys [][]byte + for ; iter.Valid(); iter.Next() { + k := append([]byte{}, iter.Key()...) + gotKeys = append(gotKeys, k) + } + require.NoError(t, iter.Close(), "closing iterator") + if len(sub.expKeys) == 0 { + require.Empty(t, gotKeys, "Iterator(%q, %q) expected no keys", sub.start, sub.end) + } else { + require.Equal(t, sub.expKeys, gotKeys, "Iterator(%q, %q) returned unexpected keys", sub.start, sub.end) + } + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, tc.run) + } +} + +//----------------------------------------------------------------------------- +// New Stress Tests +//----------------------------------------------------------------------------- + +// TestStressHighVolumeInsert inserts a large number of items and tracks peak memory. +func TestStressHighVolumeInsert(t *testing.T) { + if testing.Short() { + t.Skip("Skipping high-volume insert test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const totalInserts = 100000 + t.Logf("Inserting %d items...", totalInserts) + + var mStart, mEnd runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&mStart) + + for i := 0; i < totalInserts; i++ { + key := []byte(fmt.Sprintf("key_%d", i)) + db.Set(key, []byte("value")) + } + runtime.GC() + runtime.ReadMemStats(&mEnd) + t.Logf("Memory before: %d bytes, after: %d bytes", mStart.Alloc, mEnd.Alloc) + + require.LessOrEqual(t, mEnd.Alloc, mStart.Alloc+50*1024*1024, "Memory usage exceeded expected threshold after high-volume insert") +} + +// TestBulkDeletionMemoryRecovery verifies that deleting many entries frees memory. +func TestBulkDeletionMemoryRecovery(t *testing.T) { + if testing.Short() { + t.Skip("Skipping bulk deletion test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const totalInserts = 50000 + keys := make([][]byte, totalInserts) + for i := 0; i < totalInserts; i++ { + key := []byte(fmt.Sprintf("bulk_key_%d", i)) + keys[i] = key + db.Set(key, []byte("bulk_value")) + } + runtime.GC() + var mBefore runtime.MemStats + runtime.ReadMemStats(&mBefore) + + for _, key := range keys { + db.Delete(key) + } + runtime.GC() + var mAfter runtime.MemStats + runtime.ReadMemStats(&mAfter) + t.Logf("Memory before deletion: %d bytes, after deletion: %d bytes", mBefore.Alloc, mAfter.Alloc) + + require.Less(t, mAfter.Alloc, mBefore.Alloc, "Memory usage did not recover after bulk deletion") +} + +// TestPeakMemoryTracking tracks the peak memory usage during high-load operations. +func TestPeakMemoryTracking(t *testing.T) { + if testing.Short() { + t.Skip("Skipping peak memory tracking test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const totalOps = 100000 + var peakAlloc uint64 + var m runtime.MemStats + for i := 0; i < totalOps; i++ { + key := []byte(fmt.Sprintf("peak_key_%d", i)) + db.Set(key, []byte("peak_value")) + if i%1000 == 0 { + runtime.GC() + runtime.ReadMemStats(&m) + if m.Alloc > peakAlloc { + peakAlloc = m.Alloc + } + } + } + t.Logf("Peak memory allocation observed: %d bytes", peakAlloc) + require.LessOrEqual(t, peakAlloc, uint64(200*1024*1024), "Peak memory usage too high") +} + +//----------------------------------------------------------------------------- +// New Edge Case Tests for Memory Leaks +//----------------------------------------------------------------------------- + +// TestRepeatedCreateDestroyCycles repeatedly creates and destroys MemDB instances. +func TestRepeatedCreateDestroyCycles(t *testing.T) { + if testing.Short() { + t.Skip("Skipping repeated create/destroy cycles test in short mode") + } + const cycles = 100 + var mStart, mEnd runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&mStart) + for i := 0; i < cycles; i++ { + db := testdb.NewMemDB() + db.Set([]byte("cycle_key"), []byte("cycle_value")) + db.Close() + } + runtime.GC() + runtime.ReadMemStats(&mEnd) + t.Logf("Memory before cycles: %d bytes, after cycles: %d bytes", mStart.Alloc, mEnd.Alloc) + require.LessOrEqual(t, mEnd.Alloc, mStart.Alloc+10*1024*1024, "Memory leak detected over create/destroy cycles") +} + +// TestSmallAllocationsLeak repeatedly allocates small objects to detect leaks. +func TestSmallAllocationsLeak(t *testing.T) { + if testing.Short() { + t.Skip("Skipping small allocations leak test in short mode") + } + const iterations = 100000 + for i := 0; i < iterations; i++ { + _ = make([]byte, 32) + } + runtime.GC() + var m runtime.MemStats + runtime.ReadMemStats(&m) + t.Logf("Memory after small allocations GC: %d bytes", m.Alloc) + require.Less(t, m.Alloc, uint64(50*1024*1024), "Memory leak detected in small allocations") +} + +//----------------------------------------------------------------------------- +// New Concurrency Tests +//----------------------------------------------------------------------------- + +// TestConcurrentAccess performs parallel read/write operations on the MemDB. +func TestConcurrentAccess(t *testing.T) { + if testing.Short() { + t.Skip("Skipping concurrent access test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const numWriters = 10 + const numReaders = 10 + const opsPerGoroutine = 1000 + var wg sync.WaitGroup + + // Writers. + for i := 0; i < numWriters; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for j := 0; j < opsPerGoroutine; j++ { + key := []byte(fmt.Sprintf("concurrent_key_%d_%d", id, j)) + db.Set(key, []byte("concurrent_value")) + } + }(i) + } + + // Readers. + for i := 0; i < numReaders; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < opsPerGoroutine; j++ { + iter, err := db.Iterator(nil, nil) + require.NoError(t, err) + for iter.Valid() { + _ = iter.Key() + iter.Next() + } + iter.Close() + } + }() + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(30 * time.Second): + t.Fatal("Concurrent access test timed out; potential deadlock or race condition") + } +} + +// TestLockingAndRelease simulates read-write conflicts to ensure proper lock handling. +func TestLockingAndRelease(t *testing.T) { + if testing.Short() { + t.Skip("Skipping locking and release test in short mode") } + db := testdb.NewMemDB() + defer db.Close() + + db.Set([]byte("conflict_key"), []byte("initial")) + + ready := make(chan struct{}) + release := make(chan struct{}) + go func() { + iter, err := db.Iterator([]byte("conflict_key"), []byte("zzzz")) + require.NoError(t, err) + assert.NoError(t, iter.Error(), "Iterator creation error") + close(ready) // signal iterator is active + <-release // hold the iterator a bit + iter.Close() + }() + + <-ready + done := make(chan struct{}) + go func() { + db.Set([]byte("conflict_key"), []byte("updated")) + close(done) + }() + + time.Sleep(200 * time.Millisecond) + close(release) + + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("Exclusive lock not acquired after read lock release; potential deadlock") + } +} + +//----------------------------------------------------------------------------- +// New Sustained Memory Usage Tests +//----------------------------------------------------------------------------- + +// TestLongRunningWorkload simulates a long-running workload and verifies memory stability. +func TestLongRunningWorkload(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long-running workload test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const iterations = 10000 + const reportInterval = 1000 + var mInitial runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&mInitial) + + for i := 0; i < iterations; i++ { + key := []byte(fmt.Sprintf("workload_key_%d", i)) + db.Set(key, []byte("workload_value")) + if i%2 == 0 { + db.Delete(key) + } + if i%reportInterval == 0 { + runtime.GC() + var m runtime.MemStats + runtime.ReadMemStats(&m) + t.Logf("Iteration %d: HeapAlloc=%d bytes", i, m.HeapAlloc) + } + } + runtime.GC() + var mFinal runtime.MemStats + runtime.ReadMemStats(&mFinal) + t.Logf("Initial HeapAlloc=%d bytes, Final HeapAlloc=%d bytes", mInitial.HeapAlloc, mFinal.HeapAlloc) + + require.LessOrEqual(t, mFinal.HeapAlloc, mInitial.HeapAlloc+20*1024*1024, "Memory usage increased over long workload") +} + +//----------------------------------------------------------------------------- +// Additional Utility Test for Memory Metrics +//----------------------------------------------------------------------------- + +// TestMemoryMetrics verifies that allocation and free counters remain reasonably balanced. +func TestMemoryMetrics(t *testing.T) { + if testing.Short() { + t.Skip("Skipping memory metrics test in short mode") + } + var mBefore, mAfter runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&mBefore) + + const allocCount = 10000 + for i := 0; i < allocCount; i++ { + _ = make([]byte, 128) + } + runtime.GC() + runtime.ReadMemStats(&mAfter) + t.Logf("Mallocs: before=%d, after=%d, diff=%d", mBefore.Mallocs, mAfter.Mallocs, mAfter.Mallocs-mBefore.Mallocs) + t.Logf("Frees: before=%d, after=%d, diff=%d", mBefore.Frees, mAfter.Frees, mAfter.Frees-mBefore.Frees) + + // Use original acceptable threshold. + diff := mAfter.Mallocs - mAfter.Frees + require.LessOrEqual(t, diff, uint64(allocCount/10), "Unexpected allocation leak detected") +} + +// ----------------------------------------------------------------------------- +// Additional New Test Ideas +// +// TestRandomMemoryAccessPatterns simulates random insertions and deletions, +// which can reveal subtle memory fragmentation or concurrent issues. +func TestRandomMemoryAccessPatterns(t *testing.T) { + if testing.Short() { + t.Skip("Skipping random memory access patterns test in short mode") + } + db := testdb.NewMemDB() + defer db.Close() + + const ops = 50000 + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(seed int) { + defer wg.Done() + for j := 0; j < ops; j++ { + if j%2 == 0 { + key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j)) + db.Set(key, []byte("rand_value")) + } else { + // Randomly delete some keys. + key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j-1)) + db.Delete(key) + } + } + }(i) + } + wg.Wait() + // After random operations, check that GC recovers memory. + runtime.GC() + var m runtime.MemStats + runtime.ReadMemStats(&m) + t.Logf("After random memory access, HeapAlloc=%d bytes", m.HeapAlloc) +} + +// TestMemoryFragmentation attempts to force fragmentation by alternating large and small allocations. +func TestMemoryFragmentation(t *testing.T) { + if testing.Short() { + t.Skip("Skipping memory fragmentation test in short mode") + } + const iterations = 10000 + for i := 0; i < iterations; i++ { + if i%10 == 0 { + // Allocate a larger block (e.g. 64KB) + _ = make([]byte, 64*1024) + } else { + _ = make([]byte, 256) + } + } + runtime.GC() + var m runtime.MemStats + runtime.ReadMemStats(&m) + t.Logf("After fragmentation test, HeapAlloc=%d bytes", m.HeapAlloc) + // We expect that HeapAlloc should eventually come down. + require.Less(t, m.HeapAlloc, uint64(100*1024*1024), "Memory fragmentation causing high HeapAlloc") +} + +// getMemoryStats returns current heap allocation and allocation counters +func getMemoryStats() (heapAlloc, mallocs, frees uint64) { + var m runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&m) + return m.HeapAlloc, m.Mallocs, m.Frees +} + +// TestWasmVMMemoryLeakStress tests memory stability under repeated contract operations +func TestWasmVMMemoryLeakStress(t *testing.T) { + if testing.Short() { + t.Skip("Skipping WASM VM stress test in short mode") + } + + baseAlloc, baseMallocs, baseFrees := getMemoryStats() + t.Logf("Baseline: Heap=%d bytes, Mallocs=%d, Frees=%d", baseAlloc, baseMallocs, baseFrees) + + const iterations = 5000 + wasmCode, err := os.ReadFile("../../testdata/hackatom.wasm") + require.NoError(t, err) + + for i := 0; i < iterations; i++ { + tempDir := t.TempDir() + config := types.VMConfig{ + Cache: types.CacheOptions{ + BaseDir: tempDir, + AvailableCapabilities: []string{"iterator", "staking"}, + MemoryCacheSizeBytes: types.NewSizeMebi(64), + InstanceMemoryLimitBytes: types.NewSizeMebi(32), + }, + } + cache, err := InitCache(config) + require.NoError(t, err, "Cache init failed at iteration %d", i) + + checksum, err := StoreCode(cache, wasmCode, true) + require.NoError(t, err) + + db := testdb.NewMemDB() + gasMeter := NewMockGasMeter(100000000) + env := MockEnvBin(t) + info := MockInfoBin(t, "creator") + msg := []byte(`{"verifier": "test", "beneficiary": "test"}`) + + var igasMeter types.GasMeter = gasMeter + store := NewLookup(gasMeter) + api := NewMockAPI() + querier := DefaultQuerier(MOCK_CONTRACT_ADDR, nil) + + // Perform instantiate (potential leak point) + _, _, err = Instantiate(cache, checksum, env, info, msg, &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + + // Sometimes skip cleanup to test leak handling + if i%10 != 0 { + ReleaseCache(cache) + } + db.Close() + + if i%100 == 0 { + alloc, mallocs, frees := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d), Mallocs=%d, Frees=%d", + i, alloc, alloc-baseAlloc, mallocs-baseMallocs, frees-baseFrees) + require.Less(t, alloc, baseAlloc*2, "Memory doubled at iteration %d", i) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Final: Heap=%d bytes (+%d), Net allocations=%d", + finalAlloc, finalAlloc-baseAlloc, (finalMallocs-finalFrees)-(baseMallocs-baseFrees)) + require.Less(t, finalAlloc, baseAlloc+20*1024*1024, "Significant memory leak detected") +} + +// TestConcurrentWasmOperations tests memory under concurrent contract operations +func TestConcurrentWasmOperations(t *testing.T) { + if testing.Short() { + t.Skip("Skipping concurrent WASM test in short mode") + } + + tempDir := t.TempDir() + + config := types.VMConfig{ + Cache: types.CacheOptions{ + BaseDir: tempDir, + AvailableCapabilities: []string{}, + MemoryCacheSizeBytes: types.NewSizeMebi(128), + InstanceMemoryLimitBytes: types.NewSizeMebi(32), + }, + } + + cache, err := InitCache(config) + require.NoError(t, err) + defer ReleaseCache(cache) + + wasmCode, err := os.ReadFile("../../testdata/hackatom.wasm") + require.NoError(t, err) + checksum, err := StoreCode(cache, wasmCode, true) + require.NoError(t, err) + + const goroutines = 20 + const operations = 1000 + var wg sync.WaitGroup + + baseAlloc, _, _ := getMemoryStats() + env := MockEnvBin(t) + api := NewMockAPI() + querier := DefaultQuerier(MOCK_CONTRACT_ADDR, nil) + + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func(gid int) { + defer wg.Done() + db := testdb.NewMemDB() + defer db.Close() + + for j := 0; j < operations; j++ { + gasMeter := NewMockGasMeter(100000000) + var igasMeter types.GasMeter = gasMeter + store := NewLookup(gasMeter) + info := MockInfoBin(t, fmt.Sprintf("sender%d", gid)) + + msg := []byte(fmt.Sprintf(`{"verifier": "test%d", "beneficiary": "test%d"}`, j, j)) + _, _, err := Instantiate(cache, checksum, env, info, msg, &igasMeter, store, api, &querier, 100000000, false) + assert.NoError(t, err) + } + }(i) + } + + wg.Wait() + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Concurrent test: Initial=%d bytes, Final=%d bytes, Net allocs=%d", + baseAlloc, finalAlloc, finalMallocs-finalFrees) + require.Less(t, finalAlloc, baseAlloc+30*1024*1024, "Concurrent operations leaked memory") +} + +// TestWasmIteratorMemoryLeaks tests iterator-specific memory handling +func TestWasmIteratorMemoryLeaks(t *testing.T) { + if testing.Short() { + t.Skip("Skipping iterator leak test in short mode") + } + + tempDir := t.TempDir() + + config := types.VMConfig{ + Cache: types.CacheOptions{ + BaseDir: tempDir, + AvailableCapabilities: []string{"iterator"}, + MemoryCacheSizeBytes: types.NewSizeMebi(64), + InstanceMemoryLimitBytes: types.NewSizeMebi(32), + }, + } + + cache, err := InitCache(config) + require.NoError(t, err) + defer ReleaseCache(cache) + + wasmCode, err := os.ReadFile("../../testdata/queue.wasm") + require.NoError(t, err) + checksum, err := StoreCode(cache, wasmCode, true) + require.NoError(t, err) + + db := testdb.NewMemDB() + defer db.Close() + + // Populate DB with data + for i := 0; i < 1000; i++ { + db.Set([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i))) + } + + gasMeter := NewMockGasMeter(100000000) + var igasMeter types.GasMeter = gasMeter + store := NewLookup(gasMeter) + api := NewMockAPI() + querier := DefaultQuerier(MOCK_CONTRACT_ADDR, nil) + env := MockEnvBin(t) + info := MockInfoBin(t, "creator") + + _, _, err = Instantiate(cache, checksum, env, info, []byte(`{}`), &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + + baseAlloc, _, _ := getMemoryStats() + const iterations = 1000 + + for i := 0; i < iterations; i++ { + gasMeter = NewMockGasMeter(100000000) + igasMeter = gasMeter + store.SetGasMeter(gasMeter) + + // Query that creates iterators (potential leak point) + _, _, err := Query(cache, checksum, env, []byte(`{"open_iterators":{"count":5}}`), + &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + + if i%100 == 0 { + alloc, _, _ := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d)", i, alloc, alloc-baseAlloc) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Iterator test: Initial=%d bytes, Final=%d bytes, Net allocs=%d", + baseAlloc, finalAlloc, finalMallocs-finalFrees) + require.Less(t, finalAlloc, baseAlloc+10*1024*1024, "Iterator operations leaked memory") +} + +// TestWasmLongRunningMemoryStability tests memory over extended operation sequences +func TestWasmLongRunningMemoryStability(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long-running WASM test in short mode") + } + + tempDir := t.TempDir() + + config := types.VMConfig{ + Cache: types.CacheOptions{ + BaseDir: tempDir, + AvailableCapabilities: []string{}, + MemoryCacheSizeBytes: types.NewSizeMebi(128), + InstanceMemoryLimitBytes: types.NewSizeMebi(32), + }, + } + + cache, err := InitCache(config) + require.NoError(t, err) + defer ReleaseCache(cache) + + wasmCode, err := os.ReadFile("../../testdata/hackatom.wasm") + require.NoError(t, err) + checksum, err := StoreCode(cache, wasmCode, true) + require.NoError(t, err) + + db := testdb.NewMemDB() + defer db.Close() + + baseAlloc, baseMallocs, baseFrees := getMemoryStats() + const iterations = 10000 + + api := NewMockAPI() + querier := DefaultQuerier(MOCK_CONTRACT_ADDR, nil) + env := MockEnvBin(t) + info := MockInfoBin(t, "creator") + + for i := 0; i < iterations; i++ { + gasMeter := NewMockGasMeter(100000000) + var igasMeter types.GasMeter = gasMeter + store := NewLookup(gasMeter) + + // Mix operations + switch i % 3 { + case 0: + _, _, err = Instantiate(cache, checksum, env, info, + []byte(fmt.Sprintf(`{"verifier": "test%d", "beneficiary": "test"}`, i)), + &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + case 1: + _, _, err = Query(cache, checksum, env, []byte(`{"verifier":{}}`), + &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + case 2: + db.Set([]byte(fmt.Sprintf("key%d", i)), []byte("value")) + _, _, err = Execute(cache, checksum, env, info, []byte(`{"release":{}}`), + &igasMeter, store, api, &querier, 100000000, false) + require.NoError(t, err) + } + + if i%1000 == 0 { + alloc, mallocs, frees := getMemoryStats() + t.Logf("Iter %d: Heap=%d bytes (+%d), Net allocs=%d", + i, alloc, alloc-baseAlloc, (mallocs-frees)-(baseMallocs-baseFrees)) + require.Less(t, alloc, baseAlloc*2, "Memory growth too high at iteration %d", i) + } + } + + finalAlloc, finalMallocs, finalFrees := getMemoryStats() + t.Logf("Final: Heap=%d bytes (+%d), Net allocs=%d", + finalAlloc, finalAlloc-baseAlloc, (finalMallocs-finalFrees)-(baseMallocs-baseFrees)) + require.LessOrEqual(t, finalAlloc, baseAlloc+25*1024*1024, "Long-running WASM leaked memory") } From b1da814367b3aedff7596298cd8ce2c36d9113c0 Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Thu, 27 Mar 2025 12:58:36 +0100 Subject: [PATCH 7/9] Add sleep after runtime.GC() to allow GC to complete --- internal/api/memory_test.go | 58 ++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/internal/api/memory_test.go b/internal/api/memory_test.go index a11aac8b0..c546aa0bc 100644 --- a/internal/api/memory_test.go +++ b/internal/api/memory_test.go @@ -167,7 +167,8 @@ func TestMemoryLeakScenarios(t *testing.T) { key := []byte("key1") val := []byte("value1") - db.Set(key, val) + err := db.Set(key, val) + require.NoError(t, err) iter, err := db.Iterator([]byte("key1"), []byte("zzzz")) require.NoError(t, err) @@ -179,7 +180,8 @@ func TestMemoryLeakScenarios(t *testing.T) { writeDone := make(chan error, 1) go func() { - db.Set([]byte("key2"), []byte("value2")) + err := db.Set([]byte("key2"), []byte("value2")) + require.NoError(t, err) writeDone <- nil }() @@ -198,8 +200,10 @@ func TestMemoryLeakScenarios(t *testing.T) { db := testdb.NewMemDB() defer db.Close() - db.Set([]byte("a"), []byte("value-a")) - db.Set([]byte("b"), []byte("value-b")) + err := db.Set([]byte("a"), []byte("value-a")) + require.NoError(t, err) + err = db.Set([]byte("b"), []byte("value-b")) + require.NoError(t, err) iter, err := db.Iterator([]byte("a"), []byte("z")) require.NoError(t, err) @@ -211,7 +215,8 @@ func TestMemoryLeakScenarios(t *testing.T) { } require.NoError(t, iter.Close(), "closing iterator should succeed") - db.Set([]byte("c"), []byte("value-c")) + err = db.Set([]byte("c"), []byte("value-c")) + require.NoError(t, err) }, }, { @@ -254,6 +259,9 @@ func TestMemoryLeakScenarios(t *testing.T) { ReleaseCache(c) } runtime.GC() + // Wait to allow GC to complete. + time.Sleep(5 * time.Second) + allocAfterRelease := getAlloc() require.Less(t, allocAfterRelease, baseAlloc*2, @@ -271,7 +279,8 @@ func TestMemoryLeakScenarios(t *testing.T) { keys := [][]byte{[]byte("a"), []byte("b"), []byte("c")} for _, k := range keys { - db.Set(k, []byte("val:"+string(k))) + err := db.Set(k, []byte("val:"+string(k))) + require.NoError(t, err) } subCases := []struct { @@ -331,7 +340,8 @@ func TestStressHighVolumeInsert(t *testing.T) { for i := 0; i < totalInserts; i++ { key := []byte(fmt.Sprintf("key_%d", i)) - db.Set(key, []byte("value")) + err := db.Set(key, []byte("value")) + require.NoError(t, err) } runtime.GC() runtime.ReadMemStats(&mEnd) @@ -353,7 +363,8 @@ func TestBulkDeletionMemoryRecovery(t *testing.T) { for i := 0; i < totalInserts; i++ { key := []byte(fmt.Sprintf("bulk_key_%d", i)) keys[i] = key - db.Set(key, []byte("bulk_value")) + err := db.Set(key, []byte("bulk_value")) + require.NoError(t, err) } runtime.GC() var mBefore runtime.MemStats @@ -383,7 +394,8 @@ func TestPeakMemoryTracking(t *testing.T) { var m runtime.MemStats for i := 0; i < totalOps; i++ { key := []byte(fmt.Sprintf("peak_key_%d", i)) - db.Set(key, []byte("peak_value")) + err := db.Set(key, []byte("peak_value")) + require.NoError(t, err) if i%1000 == 0 { runtime.GC() runtime.ReadMemStats(&m) @@ -411,7 +423,8 @@ func TestRepeatedCreateDestroyCycles(t *testing.T) { runtime.ReadMemStats(&mStart) for i := 0; i < cycles; i++ { db := testdb.NewMemDB() - db.Set([]byte("cycle_key"), []byte("cycle_value")) + err := db.Set([]byte("cycle_key"), []byte("cycle_value")) + require.NoError(t, err) db.Close() } runtime.GC() @@ -460,7 +473,8 @@ func TestConcurrentAccess(t *testing.T) { defer wg.Done() for j := 0; j < opsPerGoroutine; j++ { key := []byte(fmt.Sprintf("concurrent_key_%d_%d", id, j)) - db.Set(key, []byte("concurrent_value")) + err := db.Set(key, []byte("concurrent_value")) + require.NoError(t, err) } }(i) } @@ -503,7 +517,8 @@ func TestLockingAndRelease(t *testing.T) { db := testdb.NewMemDB() defer db.Close() - db.Set([]byte("conflict_key"), []byte("initial")) + err := db.Set([]byte("conflict_key"), []byte("initial")) + require.NoError(t, err) ready := make(chan struct{}) release := make(chan struct{}) @@ -519,7 +534,8 @@ func TestLockingAndRelease(t *testing.T) { <-ready done := make(chan struct{}) go func() { - db.Set([]byte("conflict_key"), []byte("updated")) + err := db.Set([]byte("conflict_key"), []byte("updated")) + require.NoError(t, err) close(done) }() @@ -553,7 +569,8 @@ func TestLongRunningWorkload(t *testing.T) { for i := 0; i < iterations; i++ { key := []byte(fmt.Sprintf("workload_key_%d", i)) - db.Set(key, []byte("workload_value")) + err := db.Set(key, []byte("workload_value")) + require.NoError(t, err) if i%2 == 0 { db.Delete(key) } @@ -590,6 +607,10 @@ func TestMemoryMetrics(t *testing.T) { _ = make([]byte, 128) } runtime.GC() + + // Wait a moment to allow GC to complete. + time.Sleep(5 * time.Second) + runtime.ReadMemStats(&mAfter) t.Logf("Mallocs: before=%d, after=%d, diff=%d", mBefore.Mallocs, mAfter.Mallocs, mAfter.Mallocs-mBefore.Mallocs) t.Logf("Frees: before=%d, after=%d, diff=%d", mBefore.Frees, mAfter.Frees, mAfter.Frees-mBefore.Frees) @@ -620,7 +641,8 @@ func TestRandomMemoryAccessPatterns(t *testing.T) { for j := 0; j < ops; j++ { if j%2 == 0 { key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j)) - db.Set(key, []byte("rand_value")) + err := db.Set(key, []byte("rand_value")) + require.NoError(t, err) } else { // Randomly delete some keys. key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j-1)) @@ -824,7 +846,8 @@ func TestWasmIteratorMemoryLeaks(t *testing.T) { // Populate DB with data for i := 0; i < 1000; i++ { - db.Set([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i))) + err := db.Set([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i))) + require.NoError(t, err) } gasMeter := NewMockGasMeter(100000000) @@ -917,7 +940,8 @@ func TestWasmLongRunningMemoryStability(t *testing.T) { &igasMeter, store, api, &querier, 100000000, false) require.NoError(t, err) case 2: - db.Set([]byte(fmt.Sprintf("key%d", i)), []byte("value")) + err := db.Set([]byte(fmt.Sprintf("key%d", i)), []byte("value")) + require.NoError(t, err) _, _, err = Execute(cache, checksum, env, info, []byte(`{"release":{}}`), &igasMeter, store, api, &querier, 100000000, false) require.NoError(t, err) From ba7adb6912d55778a7c55419bc36dfb6ed552014 Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Thu, 27 Mar 2025 14:22:00 +0100 Subject: [PATCH 8/9] Fix tests --- internal/api/memory_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/api/memory_test.go b/internal/api/memory_test.go index c546aa0bc..00ca12aaf 100644 --- a/internal/api/memory_test.go +++ b/internal/api/memory_test.go @@ -224,10 +224,10 @@ func TestMemoryLeakScenarios(t *testing.T) { run: func(t *testing.T) { t.Helper() // Ensure that releasing caches frees memory. - getAlloc := func() uint64 { + getAlloc := func() int64 { var m runtime.MemStats runtime.ReadMemStats(&m) - return m.HeapAlloc + return int64(m.HeapAlloc) } runtime.GC() @@ -260,7 +260,7 @@ func TestMemoryLeakScenarios(t *testing.T) { } runtime.GC() // Wait to allow GC to complete. - time.Sleep(5 * time.Second) + time.Sleep(1 * time.Second) allocAfterRelease := getAlloc() @@ -609,15 +609,15 @@ func TestMemoryMetrics(t *testing.T) { runtime.GC() // Wait a moment to allow GC to complete. - time.Sleep(5 * time.Second) + time.Sleep(1 * time.Second) runtime.ReadMemStats(&mAfter) t.Logf("Mallocs: before=%d, after=%d, diff=%d", mBefore.Mallocs, mAfter.Mallocs, mAfter.Mallocs-mBefore.Mallocs) t.Logf("Frees: before=%d, after=%d, diff=%d", mBefore.Frees, mAfter.Frees, mAfter.Frees-mBefore.Frees) // Use original acceptable threshold. - diff := mAfter.Mallocs - mAfter.Frees - require.LessOrEqual(t, diff, uint64(allocCount/10), "Unexpected allocation leak detected") + diff := int64(mAfter.Mallocs-mBefore.Mallocs) - int64(mAfter.Frees-mBefore.Frees) + require.LessOrEqual(t, diff, int64(allocCount/10), "Unexpected allocation leak detected") } // ----------------------------------------------------------------------------- From af326ba2f0a7d24f23f1bc830fdc546dc256e4ba Mon Sep 17 00:00:00 2001 From: Pino' Surace Date: Thu, 27 Mar 2025 14:31:22 +0100 Subject: [PATCH 9/9] Fix lint errors --- internal/api/memory_test.go | 21 ++++++++++++--------- internal/api/mocks.go | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/internal/api/memory_test.go b/internal/api/memory_test.go index 00ca12aaf..39e5cff54 100644 --- a/internal/api/memory_test.go +++ b/internal/api/memory_test.go @@ -181,7 +181,7 @@ func TestMemoryLeakScenarios(t *testing.T) { writeDone := make(chan error, 1) go func() { err := db.Set([]byte("key2"), []byte("value2")) - require.NoError(t, err) + assert.NoError(t, err) writeDone <- nil }() @@ -371,7 +371,8 @@ func TestBulkDeletionMemoryRecovery(t *testing.T) { runtime.ReadMemStats(&mBefore) for _, key := range keys { - db.Delete(key) + err := db.Delete(key) + require.NoError(t, err) } runtime.GC() var mAfter runtime.MemStats @@ -474,7 +475,7 @@ func TestConcurrentAccess(t *testing.T) { for j := 0; j < opsPerGoroutine; j++ { key := []byte(fmt.Sprintf("concurrent_key_%d_%d", id, j)) err := db.Set(key, []byte("concurrent_value")) - require.NoError(t, err) + assert.NoError(t, err) } }(i) } @@ -486,7 +487,7 @@ func TestConcurrentAccess(t *testing.T) { defer wg.Done() for j := 0; j < opsPerGoroutine; j++ { iter, err := db.Iterator(nil, nil) - require.NoError(t, err) + assert.NoError(t, err) for iter.Valid() { _ = iter.Key() iter.Next() @@ -524,7 +525,7 @@ func TestLockingAndRelease(t *testing.T) { release := make(chan struct{}) go func() { iter, err := db.Iterator([]byte("conflict_key"), []byte("zzzz")) - require.NoError(t, err) + assert.NoError(t, err) assert.NoError(t, iter.Error(), "Iterator creation error") close(ready) // signal iterator is active <-release // hold the iterator a bit @@ -535,7 +536,7 @@ func TestLockingAndRelease(t *testing.T) { done := make(chan struct{}) go func() { err := db.Set([]byte("conflict_key"), []byte("updated")) - require.NoError(t, err) + assert.NoError(t, err) close(done) }() @@ -572,7 +573,8 @@ func TestLongRunningWorkload(t *testing.T) { err := db.Set(key, []byte("workload_value")) require.NoError(t, err) if i%2 == 0 { - db.Delete(key) + err = db.Delete(key) + require.NoError(t, err) } if i%reportInterval == 0 { runtime.GC() @@ -642,11 +644,12 @@ func TestRandomMemoryAccessPatterns(t *testing.T) { if j%2 == 0 { key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j)) err := db.Set(key, []byte("rand_value")) - require.NoError(t, err) + assert.NoError(t, err) } else { // Randomly delete some keys. key := []byte(fmt.Sprintf("rand_key_%d_%d", seed, j-1)) - db.Delete(key) + err := db.Delete(key) + assert.NoError(t, err) } } }(i) diff --git a/internal/api/mocks.go b/internal/api/mocks.go index 7f8547308..d6ceb30c9 100644 --- a/internal/api/mocks.go +++ b/internal/api/mocks.go @@ -59,7 +59,7 @@ func MockInfoWithFunds(sender types.HumanAddress) types.MessageInfo { func MockInfoBin(tb testing.TB, sender types.HumanAddress) []byte { tb.Helper() bin, err := json.Marshal(MockInfoWithFunds(sender)) - require.NoError(tb, err) + assert.NoError(tb, err) return bin }