Skip to content

Commit

Permalink
update listener_v2_log_listener_test
Browse files Browse the repository at this point in the history
  • Loading branch information
reductionista committed May 15, 2024
1 parent 82787a9 commit 5bd0199
Showing 1 changed file with 48 additions and 45 deletions.
93 changes: 48 additions & 45 deletions core/services/vrf/v2/listener_v2_log_listener_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient/simulated"
"github.com/jmoiron/sqlx"
"github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -45,7 +47,7 @@ type vrfLogPollerListenerTH struct {
ChainID *big.Int
ORM logpoller.ORM
LogPoller logpoller.LogPollerTest
Client *backends.SimulatedBackend
Backend *simulated.Backend
Emitter *log_emitter.LogEmitter
EmitterAddress common.Address
VRFLogEmitter *vrf_log_emitter.VRFLogEmitter
Expand All @@ -69,23 +71,24 @@ func setupVRFLogPollerListenerTH(t *testing.T,

o := logpoller.NewORM(chainID, db, lggr)
owner := testutils.MustNewSimTransactor(t)
ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{
backend := simulated.NewBackend(ethtypes.GenesisAlloc{
owner.From: {
Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)),
},
}, 10e6)
}, simulated.WithBlockGasLimit(10e6))
ec := backend.Client()
// VRF Listener relies on block timestamps, but SimulatedBackend uses by default clock starting from 1970-01-01
// This trick is used to move the clock closer to the current time. We set first block to be X hours ago.
// FirstBlockAge is used to compute first block's timestamp in SimulatedBackend (time.Now() - FirstBlockAge)
const FirstBlockAge = 24 * time.Hour
h, err := ec.HeaderByNumber(testutils.Context(t), nil)
require.NoError(t, err)
blockTime := time.UnixMilli(int64(h.Time))
err = ec.AdjustTime(time.Since(blockTime) - FirstBlockAge)
err = backend.AdjustTime(time.Since(blockTime) - FirstBlockAge)
require.NoError(t, err)
ec.Commit()
backend.Commit()

esc := client.NewSimulatedBackendClient(t, ec, chainID)
esc := client.NewSimulatedBackendClient(t, backend, chainID)
// Mark genesis block as finalized to avoid any nulls in the tests
//TODO must we do this?
//h, err := ec.HeaderByNumber(ctx, nil)
Expand All @@ -109,7 +112,7 @@ func setupVRFLogPollerListenerTH(t *testing.T,
require.NoError(t, err)
vrfLogEmitterAddress, _, vrfLogEmitter, err := vrf_log_emitter.DeployVRFLogEmitter(owner, ec)
require.NoError(t, err)
ec.Commit()
backend.Commit()

// Log Poller Listener
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr)
Expand Down Expand Up @@ -168,7 +171,7 @@ func setupVRFLogPollerListenerTH(t *testing.T,
EmitterAddress: emitterAddress1,
VRFLogEmitter: vrfLogEmitter,
VRFEmitterAddress: vrfLogEmitterAddress,
Client: ec,
Backend: backend,
Owner: owner,
Db: db,
Listener: listener,
Expand Down Expand Up @@ -197,7 +200,7 @@ func TestInitProcessedBlock_NoVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Emit some logs from block 5 to 9 (Inclusive)
Expand All @@ -207,7 +210,7 @@ func TestInitProcessedBlock_NoVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 5 (EmitLog blocks) = 9
Expand Down Expand Up @@ -270,7 +273,7 @@ func TestInitProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Create VRF request block and a fulfillment block
Expand All @@ -281,10 +284,10 @@ func TestInitProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()
_, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID, preSeed, big.NewInt(10), true)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
n := 5
Expand All @@ -293,7 +296,7 @@ func TestInitProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db.
Expand Down Expand Up @@ -327,7 +330,7 @@ func TestInitProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Make a VRF request without fulfilling it
Expand All @@ -338,17 +341,17 @@ func TestInitProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
n := 5
th.Client.Commit()
th.Backend.Commit()
for i := 0; i < n; i++ {
_, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db.
Expand Down Expand Up @@ -381,7 +384,7 @@ func TestInitProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Emit some logs in blocks with VRF reqs interspersed
Expand All @@ -392,7 +395,7 @@ func TestInitProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()

// Create 2 blocks with VRF requests in each iteration
keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes())
Expand All @@ -402,13 +405,13 @@ func TestInitProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

reqID2 := big.NewInt(int64(2*i + 1))
_, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()
}

// Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db.
Expand Down Expand Up @@ -443,7 +446,7 @@ func TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Emit some logs in blocks with VRF reqs interspersed
Expand All @@ -454,7 +457,7 @@ func TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()

// Create 2 blocks with VRF requests in each iteration and fulfill one
// of them. This creates a mixed workload of fulfilled and unfulfilled
Expand All @@ -466,7 +469,7 @@ func TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

reqID2 := big.NewInt(int64(2*i + 1))
_, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
Expand All @@ -475,7 +478,7 @@ func TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {

_, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()
}

// Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db.
Expand Down Expand Up @@ -518,7 +521,7 @@ func TestUpdateLastProcessedBlock_NoVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Create VRF request logs
Expand All @@ -530,13 +533,13 @@ func TestUpdateLastProcessedBlock_NoVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

reqID2 := big.NewInt(int64(2))
_, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
n := 5
Expand All @@ -545,7 +548,7 @@ func TestUpdateLastProcessedBlock_NoVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 2 (VRF req blocks) + 5 (EmitLog blocks) = 11
Expand Down Expand Up @@ -575,7 +578,7 @@ func TestUpdateLastProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Create VRF request log block with a fulfillment log block
Expand All @@ -587,11 +590,11 @@ func TestUpdateLastProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

_, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
n := 5
Expand All @@ -600,7 +603,7 @@ func TestUpdateLastProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 2 (VRF req/resp blocks) + 5 (EmitLog blocks) = 11
Expand Down Expand Up @@ -630,7 +633,7 @@ func TestUpdateLastProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Create VRF request logs without a fulfillment log block
Expand All @@ -642,7 +645,7 @@ func TestUpdateLastProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
n := 5
Expand All @@ -651,7 +654,7 @@ func TestUpdateLastProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 1 (VRF req block) + 5 (EmitLog blocks) = 10
Expand Down Expand Up @@ -681,7 +684,7 @@ func TestUpdateLastProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
Expand All @@ -691,7 +694,7 @@ func TestUpdateLastProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()

// Create 2 blocks with VRF requests in each iteration
keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes())
Expand All @@ -702,13 +705,13 @@ func TestUpdateLastProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

reqID2 := big.NewInt(int64(2*i + 1))
_, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req blocks) = 19
Expand Down Expand Up @@ -738,7 +741,7 @@ func TestUpdateLastProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {

// Block 3 to finalityDepth. Ensure we have finality number of blocks
for i := 1; i < int(finalityDepth); i++ {
th.Client.Commit()
th.Backend.Commit()
}

// Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock
Expand All @@ -748,7 +751,7 @@ func TestUpdateLastProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {
require.NoError(t, err1)
_, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err1)
th.Client.Commit()
th.Backend.Commit()

// Create 2 blocks with VRF requests in each iteration and fulfill one
// of them. This creates a mixed workload of fulfilled and unfulfilled
Expand All @@ -761,15 +764,15 @@ func TestUpdateLastProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) {
_, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()

reqID2 := big.NewInt(int64(2*i + 1))
_, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner,
keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From)
require.NoError(t, err2)
_, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true)
require.NoError(t, err2)
th.Client.Commit()
th.Backend.Commit()
}

// Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req blocks) = 19
Expand Down

0 comments on commit 5bd0199

Please sign in to comment.