diff --git a/.golangci.yml b/.golangci.yml index d048104fb..0fe242313 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,11 @@ linters: - disable-all: true + disable-all: false enable: - - errcheck - - govet + # Finds repeated strings that could be made constants + - goconst + # Ensures that the code was formatted with `gofmt -s` + - gofmt + # Identifies commonly misspelled words + - misspell + # Identifies unused function parameters + - unparam diff --git a/Makefile b/Makefile index 6e8061b6c..7e7982503 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,11 @@ deps-ts-no-lockfile: yarn install --frozen-lockfile +# Provides a pre-commit convenience command that runs all of the tests and the linters +.PHONY: check +check: test-all lint + + .PHONY: test-all test-all: test-go test-wasm-browser test-ts test-browser-conversion test-browser-integration diff --git a/cmd/cut-release/main.go b/cmd/cut-release/main.go index d489d7bc6..a8e2802a0 100644 --- a/cmd/cut-release/main.go +++ b/cmd/cut-release/main.go @@ -92,6 +92,12 @@ func generateTypescriptDocs() { } } +const ( + captureVersionString = `"version": "(.*)"` + captureMeshBrowserVersionString = `"@0x/mesh-browser": "(.*)"` + captureMeshBrowserLiteVersionString = `"@0x/mesh-browser-lite": "(.*)"` +) + // Update the version string in all files that must be updated for a new release func updateHardCodedVersions(version string) { newVersionString := fmt.Sprintf(`"version": "%s"`, version) @@ -100,68 +106,56 @@ func updateHardCodedVersions(version string) { // Update `packages/mesh-graphql-client/package.json` tsClientPackageJSONPath := "packages/mesh-graphql-client/package.json" - regex := `"version": "(.*)"` - updateFileWithRegex(tsClientPackageJSONPath, regex, newVersionString) + updateFileWithRegex(tsClientPackageJSONPath, captureVersionString, newVersionString) // Update `packages/mesh-browser-lite/package.json` browserLitePackageJSONPath := "packages/mesh-browser-lite/package.json" - regex = `"version": "(.*)"` - updateFileWithRegex(browserLitePackageJSONPath, regex, newVersionString) + updateFileWithRegex(browserLitePackageJSONPath, captureVersionString, newVersionString) // Update `packages/mesh-browser/package.json` browserPackageJSONPath := "packages/mesh-browser/package.json" - regex = `"version": "(.*)"` - updateFileWithRegex(browserPackageJSONPath, regex, newVersionString) + updateFileWithRegex(browserPackageJSONPath, captureVersionString, newVersionString) // NOTE(jalextowle): `@0x/mesh-browser` uses the local version of `@0x/mesh-browser-lite` // on the `development` branch. Once the `@0x/mesh-browser-lite` package has been published, // we need to update dependency in `@0x/mesh-browser` to published version. - regex = `"@0x/mesh-browser-lite": "(.*)"` - updateFileWithRegex(browserPackageJSONPath, regex, newBrowserLiteDependencyString) + updateFileWithRegex(browserPackageJSONPath, captureMeshBrowserLiteVersionString, newBrowserLiteDependencyString) // Update `packages/mesh-webpack-example-lite/package.json` webpackExampleLitePackageJSONPath := "packages/mesh-webpack-example-lite/package.json" - regex = `"@0x/mesh-browser-lite": "(.*)"` - updateFileWithRegex(webpackExampleLitePackageJSONPath, regex, newBrowserLiteDependencyString) + updateFileWithRegex(webpackExampleLitePackageJSONPath, captureMeshBrowserLiteVersionString, newBrowserLiteDependencyString) // Update `packages/mesh-webpack-example/package.json` webpackExamplePackageJSONPath := "packages/mesh-webpack-example/package.json" - regex = `"@0x/mesh-browser": "(.*)"` - updateFileWithRegex(webpackExamplePackageJSONPath, regex, newBrowserDependencyString) + updateFileWithRegex(webpackExamplePackageJSONPath, captureMeshBrowserVersionString, newBrowserDependencyString) // Update `packages/mesh-integration-tests/package.json` integrationTestsPackageJSONPath := "packages/mesh-integration-tests/package.json" - regex = `"@0x/mesh-browser": "(.*)"` - updateFileWithRegex(integrationTestsPackageJSONPath, regex, newBrowserDependencyString) + updateFileWithRegex(integrationTestsPackageJSONPath, captureMeshBrowserVersionString, newBrowserDependencyString) // Update `packages/mesh-browser-shim/package.json` testWasmPackageJSONPath := "packages/mesh-browser-shim/package.json" - regex = `"@0x/mesh-browser-lite": "(.*)"` - updateFileWithRegex(testWasmPackageJSONPath, regex, newBrowserLiteDependencyString) + updateFileWithRegex(testWasmPackageJSONPath, captureMeshBrowserLiteVersionString, newBrowserLiteDependencyString) // Update `core.go` corePath := "core/core.go" newVersionString = fmt.Sprintf(`version$1= "%s"`, version) - regex = `version(.*)= "(.*)"` - updateFileWithRegex(corePath, regex, newVersionString) + updateFileWithRegex(corePath, `version(.*)= "(.*)"`, newVersionString) // Update `docs/deployment_with_telemetry.md` newVersionString = fmt.Sprintf(`image: 0xorg/mesh:%s`, version) - regex = `image: 0xorg/mesh:[0-9.]+.*` - updateFileWithRegex("docs/deployment_with_telemetry.md", regex, newVersionString) + updateFileWithRegex("docs/deployment_with_telemetry.md", `image: 0xorg/mesh:[0-9.]+.*`, newVersionString) // Update `CHANGELOG.md` changelog := "CHANGELOG.md" newChangelogSection := fmt.Sprintf(`## v%s`, version) - regex = `(## Upcoming release)` - updateFileWithRegex(changelog, regex, newChangelogSection) + updateFileWithRegex(changelog, `(## Upcoming release)`, newChangelogSection) // Update badge in README.md pathToMDFilesWithBadges := []string{"README.md", "docs/graphql_api.md", "docs/deployment.md", "docs/deployment_with_telemetry.md"} doubleDashVersion := strings.Replace(version, "-", "--", -1) newSvgName := fmt.Sprintf("version-%s-orange.svg", doubleDashVersion) - regex = `version-(.*)-orange.svg` for _, path := range pathToMDFilesWithBadges { - updateFileWithRegex(path, regex, newSvgName) + updateFileWithRegex(path, `version-(.*)-orange.svg`, newSvgName) } } @@ -194,19 +188,3 @@ func getDocsCommitHash(docsPath string) (string, error) { } return matches[1], nil } - -func getFileContentsWithRegex(filePath string, regex string) (string, error) { - dat, err := ioutil.ReadFile(filePath) - if err != nil { - log.Fatal(err) - } - - var re = regexp.MustCompile(regex) - matches := re.FindAllStringSubmatch(string(dat), -1) - - if len(matches) < 1 || len(matches[0]) < 3 { - return "", errors.New("No contents found") - } - - return matches[0][2], nil -} diff --git a/cmd/mesh-bootstrap/main.go b/cmd/mesh-bootstrap/main.go index fffe209a2..5bb9a9a61 100644 --- a/cmd/mesh-bootstrap/main.go +++ b/cmd/mesh-bootstrap/main.go @@ -8,14 +8,12 @@ package main import ( "context" "fmt" - mathrand "math/rand" "strings" "time" "github.com/0xProject/0x-mesh/loghooks" "github.com/0xProject/0x-mesh/p2p" "github.com/0xProject/0x-mesh/p2p/banner" - "github.com/ipfs/go-datastore" leveldbStore "github.com/ipfs/go-ds-leveldb" libp2p "github.com/libp2p/go-libp2p" autonat "github.com/libp2p/go-libp2p-autonat-svc" @@ -40,23 +38,6 @@ const ( // peerGraceDuration is the amount of time a newly opened connection is given // before it becomes subject to pruning. peerGraceDuration = 10 * time.Second - // defaultNetworkTimeout is the default timeout for network requests (e.g. - // connecting to a new peer). - defaultNetworkTimeout = 30 * time.Second - // checkBandwidthLoopInterval is how often to potentially check bandwidth usage - // for peers. - checkBandwidthLoopInterval = 5 * time.Second - // chanceToCheckBandwidthUsage is the approximate ratio of (number of check - // bandwidth loop iterations in which we check bandwidth usage) to (total - // number of check bandwidth loop iterations). We check bandwidth - // non-deterministically in order to prevent spammers from avoiding detection - // by carefully timing their bandwidth usage. So on each iteration of the - // check bandwidth loop we generate a number between 0 and 1. If its less than - // chanceToCheckBandiwdthUsage, we perform a bandwidth check. - chanceToCheckBandwidthUsage = 0.1 - // DataStoreType constants - leveldbDataStore = "leveldb" - sqlDataStore = "sqldb" ) // Config contains configuration options for a Node. @@ -135,9 +116,7 @@ func main() { // We need to declare the newDHT function ahead of time so we can use it in // the libp2p.Routing option. var kadDHT *dht.IpfsDHT - var newDHT func(h host.Host) (routing.PeerRouting, error) - - newDHT = func(h host.Host) (routing.PeerRouting, error) { + newDHT := func(h host.Host) (routing.PeerRouting, error) { var err error dhtDir := getDHTDir(config) // Set up the DHT to use LevelDB. @@ -309,24 +288,3 @@ func parseAddrs(commaSeparatedAddrs string) ([]ma.Multiaddr, error) { } return maddrs, nil } - -func continuoslyCheckBandwidth(ctx context.Context, banner *banner.Banner) error { - ticker := time.NewTicker(checkBandwidthLoopInterval) - for { - select { - case <-ctx.Done(): - return nil - case <-ticker.C: - // Check bandwidth usage non-deterministically - if mathrand.Float64() <= chanceToCheckBandwidthUsage { - banner.CheckBandwidthUsage() - } - } - } -} - -// NewDHTWithDatastore returns a new Kademlia DHT instance configured with store -// as the persistant storage interface. -func NewDHTWithDatastore(ctx context.Context, store datastore.Batching, host host.Host) (*dht.IpfsDHT, error) { - return dht.New(ctx, host, dhtopts.Datastore(store), dhtopts.Protocols(p2p.DHTProtocolID)) -} diff --git a/cmd/mesh-bootstrap/storage.go b/cmd/mesh-bootstrap/storage.go index 29f04571a..1fbdddca9 100644 --- a/cmd/mesh-bootstrap/storage.go +++ b/cmd/mesh-bootstrap/storage.go @@ -13,11 +13,6 @@ import ( _ "github.com/lib/pq" // postgres driver ) -const ( - dhtTableName = "dhtkv" - peerStoreTableName = "peerStore" -) - func getPrivateKeyPath(config Config) string { return filepath.Join(config.LevelDBDataDir, "keys", "privkey") } diff --git a/cmd/mesh/graphql_server.go b/cmd/mesh/graphql_server.go index 842c9d648..7c8dbd5b5 100644 --- a/cmd/mesh/graphql_server.go +++ b/cmd/mesh/graphql_server.go @@ -35,12 +35,10 @@ func serveGraphQL(ctx context.Context, app *core.App, addr string, enableGraphiQ // Start the server server := &http.Server{Addr: addr, Handler: handler} go func() { - select { - case <-ctx.Done(): - shutdownContext, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout) - defer cancel() - _ = server.Shutdown(shutdownContext) - } + <-ctx.Done() + shutdownContext, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout) + defer cancel() + _ = server.Shutdown(shutdownContext) }() return server.ListenAndServe() } diff --git a/cmd/peer-id-to-pub-key/main.go b/cmd/peer-id-to-pub-key/main.go index 996b09ee7..3f4d2f5b8 100644 --- a/cmd/peer-id-to-pub-key/main.go +++ b/cmd/peer-id-to-pub-key/main.go @@ -14,7 +14,7 @@ func main() { log.Fatal("expects exactly one argument") } peerIDString := os.Args[1] - peerID, err := peer.IDB58Decode(peerIDString) + peerID, err := peer.Decode(peerIDString) if err != nil { log.Fatal(err) } diff --git a/common/types/types.go b/common/types/types.go index 89b7b6f55..389ea9cc5 100644 --- a/common/types/types.go +++ b/common/types/types.go @@ -10,6 +10,7 @@ import ( "github.com/0xProject/0x-mesh/zeroex" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" ) @@ -197,5 +198,5 @@ func BytesToHex(b []byte) string { if len(b) == 0 { return "0x" } - return common.ToHex(b) + return hexutil.Encode(b) } diff --git a/core/core.go b/core/core.go index 44c17766f..90ef9e76e 100644 --- a/core/core.go +++ b/core/core.go @@ -37,8 +37,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rpc" p2pcrypto "github.com/libp2p/go-libp2p-core/crypto" - peer "github.com/libp2p/go-libp2p-core/peer" - peerstore "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-core/peer" ma "github.com/multiformats/go-multiaddr" log "github.com/sirupsen/logrus" ) @@ -48,7 +47,6 @@ const ( ethereumRPCRequestTimeout = 30 * time.Second peerConnectTimeout = 60 * time.Second checkNewAddrInterval = 20 * time.Second - expirationPollingInterval = 50 * time.Millisecond rateLimiterCheckpointInterval = 1 * time.Minute // estimatedNonPollingEthereumRPCRequestsPer24Hrs is an estimate of the // minimum number of RPC requests Mesh needs to send (not including block @@ -285,14 +283,14 @@ func newWithPrivateConfig(ctx context.Context, config Config, pConfig privateCon } // Initialize metadata and check stored chain id (if any). - _, err = initMetadata(config.EthereumChainID, database) + err = initMetadata(config.EthereumChainID, database) if err != nil { return nil, err } // Initialize ETH JSON-RPC RateLimiter var ethRPCRateLimiter ratelimit.RateLimiter - if config.EnableEthereumRPCRateLimiting == false { + if !config.EnableEthereumRPCRateLimiting { ethRPCRateLimiter = ratelimit.NewUnlimited() } else { clock := clock.New() @@ -460,7 +458,7 @@ func initPrivateKey(path string) (p2pcrypto.PrivKey, error) { return nil, err } -func initMetadata(chainID int, database *db.DB) (*types.Metadata, error) { +func initMetadata(chainID int, database *db.DB) error { metadata, err := database.GetMetadata() if err != nil { if err == db.ErrNotFound { @@ -469,20 +467,20 @@ func initMetadata(chainID int, database *db.DB) (*types.Metadata, error) { EthereumChainID: chainID, } if err := database.SaveMetadata(metadata); err != nil { - return nil, err + return err } - return metadata, nil + return nil } - return nil, err + return err } // on subsequent startups, verify we are on the same chain if metadata.EthereumChainID != chainID { err := fmt.Errorf("expected chainID to be %d but got %d", metadata.EthereumChainID, chainID) log.WithError(err).Error("Mesh previously started on different Ethereum chain; switch chainId or remove DB") - return nil, err + return err } - return metadata, nil + return nil } func (app *App) Start() error { @@ -951,12 +949,8 @@ func (app *App) AddOrdersRaw(ctx context.Context, signedOrdersRaw []*json.RawMes return nil, err } - for _, orderInfo := range validationResults.Accepted { - allValidationResults.Accepted = append(allValidationResults.Accepted, orderInfo) - } - for _, orderInfo := range validationResults.Rejected { - allValidationResults.Rejected = append(allValidationResults.Rejected, orderInfo) - } + allValidationResults.Accepted = append(allValidationResults.Accepted, validationResults.Accepted...) + allValidationResults.Rejected = append(allValidationResults.Rejected, validationResults.Rejected...) for _, acceptedOrderInfo := range allValidationResults.Accepted { // If the order isn't new, we don't add to OrderWatcher, log it's receipt @@ -990,7 +984,7 @@ func (app *App) shareOrder(order *zeroex.SignedOrder) error { } // AddPeer can be used to manually connect to a new peer. -func (app *App) AddPeer(peerInfo peerstore.PeerInfo) error { +func (app *App) AddPeer(peerInfo peer.AddrInfo) error { <-app.started return app.node.Connect(peerInfo, peerConnectTimeout) diff --git a/core/core_test.go b/core/core_test.go index 45edb92b8..3acb61821 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -30,8 +30,6 @@ const ( // blockProcessingWaitTime is the amount of time to wait for Mesh to process // new blocks that have been mined. blockProcessingWaitTime = 1 * time.Second - // ordersyncWaitTime is the amount of time to wait for ordersync to run. - ordersyncWaitTime = 2 * time.Second ) func TestEthereumChainDetection(t *testing.T) { @@ -41,15 +39,15 @@ func TestEthereumChainDetection(t *testing.T) { require.NoError(t, err) // simulate starting up on mainnet - _, err = initMetadata(1, database) + err = initMetadata(1, database) require.NoError(t, err) // simulate restart on same chain - _, err = initMetadata(1, database) + err = initMetadata(1, database) require.NoError(t, err) // should error when attempting to start on different chain - _, err = initMetadata(2, database) + err = initMetadata(2, database) assert.Error(t, err) } @@ -94,10 +92,6 @@ func TestConfigChainIDAndRPCMatchDetection(t *testing.T) { wg.Wait() } -func newTestApp(t *testing.T, ctx context.Context) *App { - return newTestAppWithPrivateConfig(t, ctx, defaultOrderFilter, defaultPrivateConfig()) -} - func newTestAppWithPrivateConfig(t *testing.T, ctx context.Context, customOrderFilter string, pConfig privateConfig) *App { if customOrderFilter == "" { customOrderFilter = defaultOrderFilter diff --git a/core/ordersync/ordersync.go b/core/ordersync/ordersync.go index 99fdf1bfe..2ca9ca394 100644 --- a/core/ordersync/ordersync.go +++ b/core/ordersync/ordersync.go @@ -19,8 +19,8 @@ import ( "github.com/albrow/stringset" "github.com/jpillora/backoff" network "github.com/libp2p/go-libp2p-core/network" + peer "github.com/libp2p/go-libp2p-core/peer" protocol "github.com/libp2p/go-libp2p-core/protocol" - peer "github.com/libp2p/go-libp2p-peer" log "github.com/sirupsen/logrus" "golang.org/x/time/rate" ) @@ -299,7 +299,7 @@ func (s *Service) GetOrders(ctx context.Context, minPeers int) error { m.RLock() successfullySyncedPeerLength := len(successfullySyncedPeers) successfullySynced := successfullySyncedPeers.Contains(peerID.Pretty()) - nextRequest, _ := nextRequestForPeer[peerID] + nextRequest := nextRequestForPeer[peerID] m.RUnlock() if successfullySyncedPeerLength >= minPeers { return nil @@ -331,7 +331,7 @@ func (s *Service) GetOrders(ctx context.Context, minPeers int) error { } else { log.WithFields(log.Fields{ "provider": id.Pretty(), - }).Trace("succesfully got orders from peer via ordersync") + }).Trace("successfully got orders from peer via ordersync") m.Lock() successfullySyncedPeers.Add(id.Pretty()) delete(nextRequestForPeer, id) @@ -491,7 +491,7 @@ type FirstRequestsForSubprotocols struct { func (s *Service) createFirstRequestForAllSubprotocols() (*rawRequest, error) { metadata := []json.RawMessage{} for _, sid := range s.preferredSubprotocols { - subp, _ := s.subprotocolSet[sid] + subp := s.subprotocolSet[sid] m, err := subp.GenerateFirstRequestMetadata() if err != nil { return nil, err diff --git a/core/ordersync/ordersync_test.go b/core/ordersync/ordersync_test.go index c091c853e..aa521ebe3 100644 --- a/core/ordersync/ordersync_test.go +++ b/core/ordersync/ordersync_test.go @@ -13,7 +13,7 @@ import ( "github.com/0xProject/0x-mesh/p2p" "github.com/0xProject/0x-mesh/scenario" "github.com/0xProject/0x-mesh/zeroex" - peer "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,7 +58,7 @@ func TestHandleRawRequest(t *testing.T) { // This request has multiple subprotocols included and nil metadata. This // has the same structure as requests that would have been sent by older // versions of Mesh, and allows us to test that newer Mesh nodes provide - // backwards compatability as ordersync providers. + // backwards compatibility as ordersync providers. res := s.handleRawRequest(rawReq, n.ID()) require.NotNil(t, res) assert.True(t, res.Complete) @@ -70,11 +70,13 @@ func TestHandleRawRequest(t *testing.T) { // object. var metadata oneOrderSubprotocolRequestMetadata err = json.Unmarshal(res.Metadata, &metadata) + require.NoError(t, err) assert.Equal(t, oneOrderSubprotocolRequestMetadata{}, metadata) // Test handling a request from a node that is using the new first request // encoding scheme. rawReq, err = s.createFirstRequestForAllSubprotocols() + require.NoError(t, err) res = s.handleRawRequest(rawReq, n.ID()) require.NotNil(t, res) assert.True(t, res.Complete) diff --git a/db/common.go b/db/common.go index dbdfd24a1..4e110bf52 100644 --- a/db/common.go +++ b/db/common.go @@ -16,10 +16,6 @@ import ( ) const ( - // The default miniHeaderRetentionLimit used by Mesh. This default only gets overwritten in tests. - defaultMiniHeaderRetentionLimit = 20 - // The maximum MiniHeaders to query per page when deleting MiniHeaders - miniHeadersMaxPerPage = 1000 // The amount of time to wait before timing out when connecting to the database for the first time. connectTimeout = 10 * time.Second ) @@ -391,14 +387,5 @@ func checkOrderQuery(query *OrderQuery) error { return errors.New("can't use Offset without Limit") } return nil -} -func checkMiniHeaderQuery(query *MiniHeaderQuery) error { - if query == nil { - return nil - } - if query.Offset != 0 && query.Limit == 0 { - return errors.New("can't use Offset without Limit") - } - return nil } diff --git a/db/db_test.go b/db/db_test.go index 15ec94d5a..0c0839907 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -124,6 +124,7 @@ func TestAddOrdersMaxExpirationTime(t *testing.T) { copy(expectedStoredOrders, originalOrders) expectedStoredOrders[len(expectedStoredOrders)-1] = orderWithShorterExpirationTime actualStoredOrders, err := db.FindOrders(nil) + require.NoError(t, err) assertOrderSlicesAreUnsortedEqual(t, expectedStoredOrders, actualStoredOrders) // Add some pinned orders. Pinned orders should replace non-pinned orders, even if @@ -164,6 +165,7 @@ func TestAddOrdersMaxExpirationTime(t *testing.T) { copy(expectedStoredOrders, pinnedOrders) expectedStoredOrders[len(expectedStoredOrders)-1] = pinnedOrderWithShorterExpirationTime actualStoredOrders, err = db.FindOrders(nil) + require.NoError(t, err) assertOrderSlicesAreUnsortedEqual(t, expectedStoredOrders, actualStoredOrders) // Try to re-add the original (non-pinned) orders. Non-pinned orders should never replace pinned orders. @@ -176,6 +178,7 @@ func TestAddOrdersMaxExpirationTime(t *testing.T) { // Check that the orders stored in the database are the same as before (only // pinned orders with the shortest expiration time) actualStoredOrders, err = db.FindOrders(nil) + require.NoError(t, err) assertOrderSlicesAreUnsortedEqual(t, expectedStoredOrders, actualStoredOrders) } @@ -396,7 +399,7 @@ func TestFindOrdersSort(t *testing.T) { } for i, testCase := range testCases { testCaseName := fmt.Sprintf("test case %d", i) - t.Run(testCaseName, runFindOrdersSortTestCase(t, db, originalOrders, testCase)) + t.Run(testCaseName, runFindOrdersSortTestCase(db, originalOrders, testCase)) } } @@ -405,7 +408,7 @@ type findOrdersSortTestCase struct { less func([]*types.OrderWithMetadata) func(i, j int) bool } -func runFindOrdersSortTestCase(t *testing.T, db *DB, originalOrders []*types.OrderWithMetadata, testCase findOrdersSortTestCase) func(t *testing.T) { +func runFindOrdersSortTestCase(db *DB, originalOrders []*types.OrderWithMetadata, testCase findOrdersSortTestCase) func(t *testing.T) { return func(t *testing.T) { expectedOrders := make([]*types.OrderWithMetadata, len(originalOrders)) copy(expectedOrders, originalOrders) @@ -467,7 +470,7 @@ func TestFindOrdersLimitAndOffset(t *testing.T) { } for i, testCase := range testCases { testCaseName := fmt.Sprintf("test case %d", i) - t.Run(testCaseName, runFindOrdersLimitAndOffsetTestCase(t, db, originalOrders, testCase)) + t.Run(testCaseName, runFindOrdersLimitAndOffsetTestCase(db, testCase)) } } @@ -478,7 +481,7 @@ type findOrdersLimitAndOffsetTestCase struct { expectedError string } -func runFindOrdersLimitAndOffsetTestCase(t *testing.T, db *DB, originalOrders []*types.OrderWithMetadata, testCase findOrdersLimitAndOffsetTestCase) func(t *testing.T) { +func runFindOrdersLimitAndOffsetTestCase(db *DB, testCase findOrdersLimitAndOffsetTestCase) func(t *testing.T) { return func(t *testing.T) { findOpts := &OrderQuery{ Sort: []OrderSort{ @@ -510,7 +513,7 @@ func TestFindOrdersFilter(t *testing.T) { for i, testCase := range testCases { testCaseName := fmt.Sprintf("%s (test case %d)", testCase.name, i) - t.Run(testCaseName, runFindOrdersFilterTestCase(t, db, testCase)) + t.Run(testCaseName, runFindOrdersFilterTestCase(db, testCase)) } } @@ -543,7 +546,7 @@ func TestFindOrdersFilterSortLimitAndOffset(t *testing.T) { assertOrderSlicesAreEqual(t, expectedOrders, actualOrders) } -func runFindOrdersFilterTestCase(t *testing.T, db *DB, testCase orderFilterTestCase) func(t *testing.T) { +func runFindOrdersFilterTestCase(db *DB, testCase orderFilterTestCase) func(t *testing.T) { return func(t *testing.T) { findOpts := &OrderQuery{ Filters: testCase.filters, @@ -562,11 +565,11 @@ func TestCountOrdersFilter(t *testing.T) { for i, testCase := range testCases { testCaseName := fmt.Sprintf("%s (test case %d)", testCase.name, i) - t.Run(testCaseName, runCountOrdersFilterTestCase(t, db, testCase)) + t.Run(testCaseName, runCountOrdersFilterTestCase(db, testCase)) } } -func runCountOrdersFilterTestCase(t *testing.T, db *DB, testCase orderFilterTestCase) func(t *testing.T) { +func runCountOrdersFilterTestCase(db *DB, testCase orderFilterTestCase) func(t *testing.T) { return func(t *testing.T) { opts := &OrderQuery{ Filters: testCase.filters, @@ -645,11 +648,11 @@ func TestDeleteOrdersFilter(t *testing.T) { storedOrders, testCases := makeOrderFilterTestCases(t, db) for i, testCase := range testCases { testCaseName := fmt.Sprintf("%s (test case %d)", testCase.name, i) - t.Run(testCaseName, runDeleteOrdersFilterTestCase(t, db, storedOrders, testCase)) + t.Run(testCaseName, runDeleteOrdersFilterTestCase(db, storedOrders, testCase)) } } -func runDeleteOrdersFilterTestCase(t *testing.T, db *DB, originalOrders []*types.OrderWithMetadata, testCase orderFilterTestCase) func(t *testing.T) { +func runDeleteOrdersFilterTestCase(db *DB, originalOrders []*types.OrderWithMetadata, testCase orderFilterTestCase) func(t *testing.T) { return func(t *testing.T) { defer func() { // After each case, reset the state of the database by re-adding the original orders. @@ -859,7 +862,7 @@ func TestFindMiniHeadersSort(t *testing.T) { } for i, testCase := range testCases { testCaseName := fmt.Sprintf("test case %d", i) - t.Run(testCaseName, runFindMiniHeadersSortTestCase(t, db, originalMiniHeaders, testCase)) + t.Run(testCaseName, runFindMiniHeadersSortTestCase(db, originalMiniHeaders, testCase)) } } @@ -868,7 +871,7 @@ type findMiniHeadersSortTestCase struct { less func([]*types.MiniHeader) func(i, j int) bool } -func runFindMiniHeadersSortTestCase(t *testing.T, db *DB, originalMiniHeaders []*types.MiniHeader, testCase findMiniHeadersSortTestCase) func(t *testing.T) { +func runFindMiniHeadersSortTestCase(db *DB, originalMiniHeaders []*types.MiniHeader, testCase findMiniHeadersSortTestCase) func(t *testing.T) { return func(t *testing.T) { expectedMiniHeaders := make([]*types.MiniHeader, len(originalMiniHeaders)) copy(expectedMiniHeaders, originalMiniHeaders) @@ -930,7 +933,7 @@ func TestFindMiniHeadersLimitAndOffset(t *testing.T) { } for i, testCase := range testCases { testCaseName := fmt.Sprintf("test case %d", i) - t.Run(testCaseName, runFindMiniHeadersLimitAndOffsetTestCase(t, db, originalMiniHeaders, testCase)) + t.Run(testCaseName, runFindMiniHeadersLimitAndOffsetTestCase(db, testCase)) } } @@ -941,7 +944,7 @@ type findMiniHeadersLimitAndOffsetTestCase struct { expectedError string } -func runFindMiniHeadersLimitAndOffsetTestCase(t *testing.T, db *DB, originalMiniHeaders []*types.MiniHeader, testCase findMiniHeadersLimitAndOffsetTestCase) func(t *testing.T) { +func runFindMiniHeadersLimitAndOffsetTestCase(db *DB, testCase findMiniHeadersLimitAndOffsetTestCase) func(t *testing.T) { return func(t *testing.T) { findOpts := &MiniHeaderQuery{ Sort: []MiniHeaderSort{ @@ -973,11 +976,11 @@ func TestFindMiniHeadersFilter(t *testing.T) { _, testCases := makeMiniHeaderFilterTestCases(t, db) for i, testCase := range testCases { testCaseName := fmt.Sprintf("%s (test case %d)", testCase.name, i) - t.Run(testCaseName, runFindMiniHeadersFilterTestCase(t, db, testCase)) + t.Run(testCaseName, runFindMiniHeadersFilterTestCase(db, testCase)) } } -func runFindMiniHeadersFilterTestCase(t *testing.T, db *DB, testCase miniHeaderFilterTestCase) func(t *testing.T) { +func runFindMiniHeadersFilterTestCase(db *DB, testCase miniHeaderFilterTestCase) func(t *testing.T) { return func(t *testing.T) { findOpts := &MiniHeaderQuery{ Filters: testCase.filters, @@ -1056,11 +1059,11 @@ func TestDeleteMiniHeadersFilter(t *testing.T) { for i, testCase := range testCases { testCaseName := fmt.Sprintf("%s (test case %d)", testCase.name, i) - t.Run(testCaseName, runDeleteMiniHeadersFilterTestCase(t, db, storedMiniHeaders, testCase)) + t.Run(testCaseName, runDeleteMiniHeadersFilterTestCase(db, storedMiniHeaders, testCase)) } } -func runDeleteMiniHeadersFilterTestCase(t *testing.T, db *DB, storedMiniHeaders []*types.MiniHeader, testCase miniHeaderFilterTestCase) func(t *testing.T) { +func runDeleteMiniHeadersFilterTestCase(db *DB, storedMiniHeaders []*types.MiniHeader, testCase miniHeaderFilterTestCase) func(t *testing.T) { return func(t *testing.T) { defer func() { // After each case, reset the state of the database by re-adding the original miniHeaders. @@ -1148,6 +1151,7 @@ func TestUpdateMetadata(t *testing.T) { updatedMetadata.EthRPCRequestsSentInCurrentUTCDay = updatedETHRPCRequests return updatedMetadata }) + require.NoError(t, err) expectedMetadata := originalMetadata expectedMetadata.EthRPCRequestsSentInCurrentUTCDay = updatedETHRPCRequests diff --git a/db/dexie_implementation.go b/db/dexie_implementation.go index f251a4b7d..cb4651248 100644 --- a/db/dexie_implementation.go +++ b/db/dexie_implementation.go @@ -550,6 +550,16 @@ func assetDataIncludesTokenAddressAndTokenID(field OrderField, tokenAddress comm } } +func checkMiniHeaderQuery(query *MiniHeaderQuery) error { + if query == nil { + return nil + } + if query.Offset != 0 && query.Limit == 0 { + return errors.New("can't use Offset without Limit") + } + return nil +} + func logQueryIfSlow(start time.Time, msg string) { duration := time.Since(start) if duration > slowQueryDebugDuration { diff --git a/db/sql_implementation.go b/db/sql_implementation.go index a9d223b8f..7205c964d 100644 --- a/db/sql_implementation.go +++ b/db/sql_implementation.go @@ -80,10 +80,8 @@ func New(ctx context.Context, opts *Options) (*DB, error) { // Automatically close the database connection when the context is canceled. go func() { - select { - case <-ctx.Done(): - _ = sqldb.Close() - } + <-ctx.Done() + _ = sqldb.Close() }() db := &DB{ diff --git a/db/sqltypes/sqltypes.go b/db/sqltypes/sqltypes.go index b378a88c1..05b53843c 100644 --- a/db/sqltypes/sqltypes.go +++ b/db/sqltypes/sqltypes.go @@ -206,7 +206,7 @@ func (s *ParsedAssetData) Value() (driver.Value, error) { func (s *ParsedAssetData) Scan(value interface{}) error { if value == nil { - s = nil + *s = nil return nil } switch v := value.(type) { diff --git a/ethereum/blockwatch/block_watcher.go b/ethereum/blockwatch/block_watcher.go index 4214a1425..bfa4ed83e 100644 --- a/ethereum/blockwatch/block_watcher.go +++ b/ethereum/blockwatch/block_watcher.go @@ -641,7 +641,10 @@ func (w *Watcher) filterLogsRecursively(from, to int, allLogs []ethtypes.Log) ([ return allLogs, fmt.Errorf("Unable to get the logs for block #%d, because it contains too many logs", from) } - endFirstHalf := from + numBlocks/2 + // FIXME(jalextowle): This had deadcode previously. Was there something that should have + // been done here that wasn't originally? + firstBatchSize := numBlocks / 2 + endFirstHalf := from + firstBatchSize startSecondHalf := endFirstHalf + 1 allLogs, err := w.filterLogsRecursively(from, endFirstHalf, allLogs) if err != nil { diff --git a/ethereum/blockwatch/block_watcher_test.go b/ethereum/blockwatch/block_watcher_test.go index 00aef360a..bcbc60a12 100644 --- a/ethereum/blockwatch/block_watcher_test.go +++ b/ethereum/blockwatch/block_watcher_test.go @@ -420,8 +420,8 @@ func TestFilterLogsRecursively(t *testing.T) { for _, testCase := range testCases { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - fakeLogClient, err := newFakeLogClient(testCase.rangeToFilterLogsResponse) - require.NoError(t, err) + + fakeLogClient := newFakeLogClient(testCase.rangeToFilterLogsResponse) watcher := setupOrderWatcher(t, ctx, fakeLogClient) logs, err := watcher.filterLogsRecursively(from, to, []ethtypes.Log{}) @@ -522,8 +522,8 @@ func TestGetLogsInBlockRange(t *testing.T) { for _, testCase := range testCases { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - fakeLogClient, err := newFakeLogClient(testCase.RangeToFilterLogsResponse) - require.NoError(t, err) + + fakeLogClient := newFakeLogClient(testCase.RangeToFilterLogsResponse) watcher := setupOrderWatcher(t, ctx, fakeLogClient) logs, furthestBlockProcessed := watcher.getLogsInBlockRange(testCase.From, testCase.To) diff --git a/ethereum/blockwatch/fake_log_client.go b/ethereum/blockwatch/fake_log_client.go index b180ee00a..865bfb819 100644 --- a/ethereum/blockwatch/fake_log_client.go +++ b/ethereum/blockwatch/fake_log_client.go @@ -28,8 +28,8 @@ type fakeLogClient struct { } // newFakeLogClient instantiates a fakeLogClient for testing log fetching -func newFakeLogClient(rangeToResponse map[string]filterLogsResponse) (*fakeLogClient, error) { - return &fakeLogClient{count: 0, rangeToResponse: rangeToResponse}, nil +func newFakeLogClient(rangeToResponse map[string]filterLogsResponse) *fakeLogClient { + return &fakeLogClient{count: 0, rangeToResponse: rangeToResponse} } // HeaderByNumber fetches a block header by its number diff --git a/ethereum/simplestack/simple_stack_test.go b/ethereum/simplestack/simple_stack_test.go index b80d1b1fd..4bcaa4768 100644 --- a/ethereum/simplestack/simple_stack_test.go +++ b/ethereum/simplestack/simple_stack_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -const limit = 10 - var ( miniHeaderOne = &types.MiniHeader{ Number: big.NewInt(1), @@ -184,7 +182,7 @@ func TestSimpleStackCheckpointThenReset(t *testing.T) { assert.Len(t, stack.miniHeaders, 1) assert.Len(t, stack.updates, 1) - checkpointID = stack.Checkpoint() + stack.Checkpoint() assert.Len(t, stack.miniHeaders, 1) assert.Len(t, stack.updates, 0) @@ -195,7 +193,7 @@ func TestSimpleStackCheckpointThenReset(t *testing.T) { assert.Len(t, stack.miniHeaders, 0) assert.Len(t, stack.updates, 1) - checkpointID = stack.Checkpoint() + stack.Checkpoint() assert.Len(t, stack.miniHeaders, 0) assert.Len(t, stack.updates, 0) diff --git a/integration-tests/browser_integration_test.go b/integration-tests/browser_integration_test.go index 0a366f26c..7d4e0cfe5 100644 --- a/integration-tests/browser_integration_test.go +++ b/integration-tests/browser_integration_test.go @@ -39,7 +39,7 @@ func TestBrowserIntegration(t *testing.T) { ctx, _ = chromedp.NewContext(ctx, chromedp.WithErrorf(t.Errorf)) defer cancel() - removeOldFiles(t, ctx) + removeOldFiles(t) buildForTests(t, ctx) // wg is a WaitGroup for the entire tests. We won't exit until wg is done. diff --git a/integration-tests/constants.go b/integration-tests/constants.go index cf67e3bdb..21f6361ba 100644 --- a/integration-tests/constants.go +++ b/integration-tests/constants.go @@ -16,7 +16,6 @@ const ( // for the bootstrap node is checked in to version control so we know it's // peer ID ahead of time. bootstrapAddr = "/ip4/127.0.0.1/tcp/60500/ws" - bootstrapPeerID = "16Uiu2HAmGd949LwaV4KNvK2WDSiMVy7xEmW983VH75CMmefmMpP7" bootstrapList = "/ip4/127.0.0.1/tcp/60500/ws/ipfs/16Uiu2HAmGd949LwaV4KNvK2WDSiMVy7xEmW983VH75CMmefmMpP7" bootstrapDataDir = "./data/bootstrap-0" diff --git a/integration-tests/graphql_integration_test.go b/integration-tests/graphql_integration_test.go index 780c5d4d2..06c4eb053 100644 --- a/integration-tests/graphql_integration_test.go +++ b/integration-tests/graphql_integration_test.go @@ -6,7 +6,6 @@ import ( "bytes" "context" "encoding/json" - "fmt" "math/big" "sort" "sync" @@ -332,7 +331,7 @@ func sortOrdersByHashDesc(orders []*gqlclient.OrderWithMetadata) { } func buildAndStartGraphQLServer(t *testing.T, ctx context.Context, wg *sync.WaitGroup) (client *gqlclient.Client, peerID string) { - removeOldFiles(t, ctx) + removeOldFiles(t) buildStandaloneForTests(t, ctx) // Start a standalone node with a wait group that is completed when the goroutine completes. @@ -349,7 +348,7 @@ func buildAndStartGraphQLServer(t *testing.T, ctx context.Context, wg *sync.Wait PeerID string `json:"myPeerID"` } log, err := waitForLogSubstring(ctx, logMessages, "starting GraphQL server") - require.NoError(t, err, fmt.Sprintf("GraphQL server didn't start")) + require.NoError(t, err, "GraphQL server didn't start") err = json.Unmarshal([]byte(log), &jsonLog) require.NoError(t, err) diff --git a/integration-tests/utils.go b/integration-tests/utils.go index b3153f790..10af731b8 100644 --- a/integration-tests/utils.go +++ b/integration-tests/utils.go @@ -14,10 +14,8 @@ import ( "sync" "testing" - "github.com/0xProject/0x-mesh/common/types" "github.com/0xProject/0x-mesh/constants" "github.com/0xProject/0x-mesh/ethereum" - "github.com/0xProject/0x-mesh/zeroex" "github.com/chromedp/cdproto/runtime" "github.com/chromedp/chromedp" ethrpc "github.com/ethereum/go-ethereum/rpc" @@ -52,17 +50,9 @@ func init() { } } -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func removeOldFiles(t *testing.T, ctx context.Context) { +func removeOldFiles(t *testing.T) { require.NoError(t, os.RemoveAll(filepath.Join(browserIntegrationTestDataDir, "sqlite-db"))) require.NoError(t, os.RemoveAll(filepath.Join(browserIntegrationTestDataDir, "p2p"))) - require.NoError(t, os.RemoveAll(filepath.Join(bootstrapDataDir, "p2p"))) } @@ -285,28 +275,6 @@ func waitForReceivedOrderLog(ctx context.Context, logMessages <-chan string, exp }) } -// Ensure that all of the orders in given list of signed orders are included in a list of order info. The list -// of order info can contain more orders than the first list and still pass this assertion. -func assertSignedOrdersMatch(t *testing.T, expectedSignedOrders []*zeroex.SignedOrder, actualOrderInfo []*types.OrderInfo) { - for _, expectedOrder := range expectedSignedOrders { - foundMatchingOrder := false - - expectedOrderHash, err := expectedOrder.ComputeOrderHash() - require.NoError(t, err) - for _, orderInfo := range actualOrderInfo { - if orderInfo.OrderHash.Hex() == expectedOrderHash.Hex() { - foundMatchingOrder = true - expectedOrder.ResetHash() - assert.Equal(t, expectedOrder, orderInfo.SignedOrder, "signedOrder did not match") - assert.Equal(t, expectedOrder.TakerAssetAmount, orderInfo.FillableTakerAssetAmount, "fillableTakerAssetAmount did not match") - break - } - } - - assert.True(t, foundMatchingOrder, "found no matching entry in the getOrdersResponse") - } -} - // A holder type for parsing logged OrderEvents. These are received by either // a GraphQL subscription or in the TypeScript bindings and are not usually logged // by Mesh. They need to be explicitly logged. diff --git a/loghooks/key_suffix_hook.go b/loghooks/key_suffix_hook.go index 9e8fcf1f2..188ffe330 100644 --- a/loghooks/key_suffix_hook.go +++ b/loghooks/key_suffix_hook.go @@ -56,6 +56,8 @@ func (h *KeySuffixHook) Fire(entry *log.Entry) error { return nil } +const stringType = "string" + // getTypeForValue returns a string representation of the type of the given val. func getTypeForValue(val interface{}) (string, error) { if val == nil { @@ -77,12 +79,12 @@ func getTypeForValue(val interface{}) (string, error) { if _, ok := val.(encoding.TextMarshaler); ok { // The json package always encodes values that implement // encoding.TextMarshaler as a string. - return "string", nil + return stringType, nil } if _, ok := val.(error); ok { // The json package always encodes values that implement // error as a string. - return "string", nil + return stringType, nil } underlyingType := getUnderlyingType(reflect.TypeOf(val)) @@ -99,7 +101,7 @@ func getTypeForValue(val interface{}) (string, error) { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: return "number", nil case reflect.String, reflect.Complex64, reflect.Complex128, reflect.Func, reflect.Chan: - return "string", nil + return stringType, nil case reflect.Array, reflect.Slice: return "array", nil case reflect.Map: diff --git a/loghooks/key_suffix_hook_test.go b/loghooks/key_suffix_hook_test.go index e44b45964..41bf68adf 100644 --- a/loghooks/key_suffix_hook_test.go +++ b/loghooks/key_suffix_hook_test.go @@ -14,8 +14,6 @@ import ( ) type myStruct struct { - myInt int - myString string } func TestGetTypeForValue(t *testing.T) { diff --git a/orderfilter/filter.go b/orderfilter/filter.go index 47c388e65..7b2d802f8 100644 --- a/orderfilter/filter.go +++ b/orderfilter/filter.go @@ -54,6 +54,9 @@ func New(chainID int, customOrderSchema string, contractAddresses ethereum.Contr } messageLoader, err := newLoader(chainID, customOrderSchema, contractAddresses) + if err != nil { + return nil, err + } if err := messageLoader.AddSchemas(rootOrderSchemaLoader); err != nil { return nil, err } @@ -70,7 +73,7 @@ func New(chainID int, customOrderSchema string, contractAddresses ethereum.Contr }, nil } -func loadExchangeAddress(loader *jsonschema.SchemaLoader, chainID int, contractAddresses ethereum.ContractAddresses) error { +func loadExchangeAddress(loader *jsonschema.SchemaLoader, contractAddresses ethereum.ContractAddresses) error { // Note that exchangeAddressSchema accepts both checksummed and // non-checksummed (i.e. all lowercase) addresses. exchangeAddressSchema := fmt.Sprintf(`{"enum":[%q,%q]}`, contractAddresses.Exchange.Hex(), strings.ToLower(contractAddresses.Exchange.Hex())) @@ -87,7 +90,7 @@ func newLoader(chainID int, customOrderSchema string, contractAddresses ethereum if err := loadChainID(loader, chainID); err != nil { return nil, err } - if err := loadExchangeAddress(loader, chainID, contractAddresses); err != nil { + if err := loadExchangeAddress(loader, contractAddresses); err != nil { return nil, err } if err := loader.AddSchemas(builtInSchemas...); err != nil { diff --git a/orderfilter/filter_test.go b/orderfilter/filter_test.go index 5b67d15b0..75451ec20 100644 --- a/orderfilter/filter_test.go +++ b/orderfilter/filter_test.go @@ -223,7 +223,7 @@ func testFilterValidateOrderJSON(t *testing.T, generateFilter func(int, string, orderJSON: standardValidOrderJSON, }, { - note: "order with mispelled makerAddress", + note: "order with misspelled makerAddress", chainID: constants.TestChainID, customOrderSchema: DefaultCustomOrderSchema, orderJSON: []byte(`{"makerAdddress":"0xa3ece5d5b6319fa785efc10d3112769a46c6e149","takerAddress":"0x0000000000000000000000000000000000000000","makerAssetAmount":"100000000000000000000","takerAssetAmount":"100000000000000000000000","expirationTimeSeconds":"1559856615025","makerFee":"0","takerFee":"0","feeRecipientAddress":"0x0000000000000000000000000000000000000000","senderAddress":"0x0000000000000000000000000000000000000000","salt":"46108882540880341679561755865076495033942060608820537332859096815711589201849","makerAssetData":"0xf47261b0000000000000000000000000e41d2489571d322189246dafa5ebde1f4699f498","takerAssetData":"0xf47261b0000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2","makerFeeAssetData":"0x","takerFeeAssetData":"0x","exchangeAddress":"0x48bacb9266a570d521063ef5dd96e61686dbe788","chainId":1337,"signature":"0x1c52f75daa4bd2ad9e6e8a7c35adbd089d709e48ae86463f2abfafa3578747fafc264a04d02fa26227e90476d57bca94e24af32f1cc8da444bba21092ca56cd85603"}`), diff --git a/orderfilter/shared.go b/orderfilter/shared.go index 5330f4a5f..91c0601a7 100644 --- a/orderfilter/shared.go +++ b/orderfilter/shared.go @@ -159,6 +159,9 @@ func (f *Filter) MarshalJSON() ([]byte, error) { func (f *Filter) UnmarshalJSON(data []byte) error { j := jsonMarshallerForFilter{} err := json.Unmarshal(data, &j) + if err != nil { + return err + } filter, err := New(j.ChainID, j.CustomOrderSchema, ethereum.ContractAddresses{Exchange: j.ExchangeAddress}) if err != nil { return err diff --git a/p2p/banner/banner.go b/p2p/banner/banner.go index 7e4dd229a..cd3988363 100644 --- a/p2p/banner/banner.go +++ b/p2p/banner/banner.go @@ -54,7 +54,7 @@ func New(ctx context.Context, config Config) *Banner { violations: newViolationsTracker(ctx), } if config.LogBandwidthUsageStats { - go banner.continuouslyLogBandwidthUsage(ctx) + banner.continuouslyLogBandwidthUsage(ctx) } return banner } @@ -129,36 +129,39 @@ func (banner *Banner) unbanIPNet(ipNet net.IPNet) { } func (banner *Banner) continuouslyLogBandwidthUsage(ctx context.Context) { - logTicker := time.Tick(logBandwidthUsageInterval) - for { - select { - case <-ctx.Done(): - return - case <-logTicker: - // Log the bandwidth used by each peer. - for _, remotePeerID := range banner.config.Host.Network().Peers() { - stats := banner.config.BandwidthCounter.GetBandwidthForPeer(remotePeerID) - log.WithFields(log.Fields{ - "remotePeerID": remotePeerID.String(), - "bytesPerSecondIn": stats.RateIn, - "totalBytesIn": stats.TotalIn, - "bytesPerSecondOut": stats.RateOut, - "totalBytesOut": stats.TotalOut, - }).Debug("bandwidth used by peer") - } + go func() { + logTicker := time.NewTicker(logBandwidthUsageInterval) + for { + select { + case <-ctx.Done(): + logTicker.Stop() + return + case <-logTicker.C: + // Log the bandwidth used by each peer. + for _, remotePeerID := range banner.config.Host.Network().Peers() { + stats := banner.config.BandwidthCounter.GetBandwidthForPeer(remotePeerID) + log.WithFields(log.Fields{ + "remotePeerID": remotePeerID.String(), + "bytesPerSecondIn": stats.RateIn, + "totalBytesIn": stats.TotalIn, + "bytesPerSecondOut": stats.RateOut, + "totalBytesOut": stats.TotalOut, + }).Debug("bandwidth used by peer") + } - // Log the bandwidth used by each protocol. - for protocolID, stats := range banner.config.BandwidthCounter.GetBandwidthByProtocol() { - log.WithFields(log.Fields{ - "protocolID": protocolID, - "bytesPerSecondIn": stats.RateIn, - "totalBytesIn": stats.TotalIn, - "bytesPerSecondOut": stats.RateOut, - "totalBytesOut": stats.TotalOut, - }).Debug("bandwidth used by protocol") + // Log the bandwidth used by each protocol. + for protocolID, stats := range banner.config.BandwidthCounter.GetBandwidthByProtocol() { + log.WithFields(log.Fields{ + "protocolID": protocolID, + "bytesPerSecondIn": stats.RateIn, + "totalBytesIn": stats.TotalIn, + "bytesPerSecondOut": stats.RateOut, + "totalBytesOut": stats.TotalOut, + }).Debug("bandwidth used by protocol") + } } } - } + }() } // CheckBandwidthUsage checks the amount of data sent by each connected peer and diff --git a/p2p/banner/violations_tracker.go b/p2p/banner/violations_tracker.go index 1182467c2..f39a46d22 100644 --- a/p2p/banner/violations_tracker.go +++ b/p2p/banner/violations_tracker.go @@ -18,6 +18,8 @@ type violationsTracker struct { // BUG(albrow): newViolationsTracker currently leaks goroutines due to a // limitation of the caching library used under the hood. +//nolint:unparam // NOTE(jalextowle): Leave the function signature the same so +// that things don't need to be changed when the bug is fixed func newViolationsTracker(ctx context.Context) *violationsTracker { cache := ccache.New(ccache.Configure().MaxSize(violationsCacheSize).ItemsToPrune(violationsCacheSize / 10)) // TODO(albrow): We should be calling Stop to cleanup any goroutines diff --git a/p2p/node.go b/p2p/node.go index 4016ba6b8..376da5456 100644 --- a/p2p/node.go +++ b/p2p/node.go @@ -4,12 +4,9 @@ package p2p import ( "context" - "crypto/rand" "errors" "fmt" - "io/ioutil" mathrand "math/rand" - "path/filepath" "sync" "time" @@ -92,13 +89,14 @@ const ( // 0x Mesh network who is capable of sending, receiving, validating, and storing // messages. type Node struct { - ctx context.Context - config Config - messageHandler MessageHandler - host host.Host - connManager *connmgr.BasicConnMgr - dht *dht.IpfsDHT - routingDiscovery discovery.Discovery + ctx context.Context + config Config + messageHandler MessageHandler + host host.Host + connManager *connmgr.BasicConnMgr + dht *dht.IpfsDHT + // TODO(jalextowle): Make this linter compliant + routingDiscovery discovery.Discovery //nolint:staticcheck pubsub *pubsub.PubSub sub *pubsub.Subscription banner *banner.Banner @@ -170,14 +168,6 @@ type Config struct { MaxBytesPerSecond float64 } -func getPeerstoreDir(datadir string) string { - return filepath.Join(datadir, "peerstore") -} - -func getDHTDir(datadir string) string { - return filepath.Join(datadir, "dht") -} - // New creates a new Node with the given context and config. The Node will stop // all background operations if the context is canceled. func New(ctx context.Context, config Config) (*Node, error) { @@ -342,32 +332,6 @@ func registerValidators(ctx context.Context, basicHost host.Host, config Config, return nil } -func getPrivateKey(path string) (p2pcrypto.PrivKey, error) { - if path == "" { - // If path is empty, generate a new key. - priv, _, err := p2pcrypto.GenerateSecp256k1Key(rand.Reader) - if err != nil { - return nil, err - } - return priv, nil - } - - // Otherwise parse the key at the path given. - keyBytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - decodedKey, err := p2pcrypto.ConfigDecodeKey(string(keyBytes)) - if err != nil { - return nil, err - } - priv, err := p2pcrypto.UnmarshalPrivateKey(decodedKey) - if err != nil { - return nil, err - } - return priv, nil -} - // Multiaddrs returns all multi addresses at which the node is dialable. func (n *Node) Multiaddrs() []ma.Multiaddr { return n.host.Addrs() @@ -441,7 +405,8 @@ func (n *Node) Start() error { // Note(albrow): Advertise doesn't return an error, so we have no // choice but to assume it worked. for _, rendezvousPoint := range n.config.RendezvousPoints { - discovery.Advertise(n.ctx, n.routingDiscovery, rendezvousPoint, discovery.TTL(advertiseTTL)) + // TODO(jalextowle): Make this linter compliant + discovery.Advertise(n.ctx, n.routingDiscovery, rendezvousPoint, discovery.TTL(advertiseTTL)) //nolint:staticcheck } } }() @@ -617,7 +582,8 @@ func (n *Node) findNewPeers(ctx context.Context) error { }).Trace("looking for new peers") findPeersCtx, cancel := context.WithTimeout(ctx, defaultNetworkTimeout) defer cancel() - peerChan, err := n.routingDiscovery.FindPeers(findPeersCtx, rendezvousPoint, discovery.Limit(maxNewPeers)) + // TODO(jalextowle): Make this linter compliant + peerChan, err := n.routingDiscovery.FindPeers(findPeersCtx, rendezvousPoint, discovery.Limit(maxNewPeers)) //nolint:staticcheck if err != nil { return err } @@ -710,7 +676,9 @@ func (n *Node) Send(data []byte) error { // which is assigned to firstErr. var firstErr error for _, topic := range n.config.PublishTopics { - err := n.pubsub.Publish(topic, data) + // TODO(jalextowle): This should be replaced with `pubsub.Join` + // and `topic.Publish` + err := n.pubsub.Publish(topic, data) //nolint:staticcheck if err != nil && firstErr == nil { firstErr = err } @@ -723,7 +691,9 @@ func (n *Node) Send(data []byte) error { func (n *Node) receive(ctx context.Context) (*Message, error) { if n.sub == nil { var err error - n.sub, err = n.pubsub.Subscribe(n.config.SubscribeTopic) + // TODO(jalextowle): This should be replaced with `pubsub.Join` + // and `topic.Publish` + n.sub, err = n.pubsub.Subscribe(n.config.SubscribeTopic) //nolint:staticcheck if err != nil { return nil, err } diff --git a/p2p/node_test.go b/p2p/node_test.go index 72f7e5b3d..24e6c221b 100644 --- a/p2p/node_test.go +++ b/p2p/node_test.go @@ -28,10 +28,7 @@ const ( ) var ( - // Counter used for config.RandSeed. Atomically incremented each time a new Node - // is created. - counter int64 = -1 - testRendezvousPoints = []string{"0x-mesh-testing-rendezvous"} + testRendezvousPoints = []string{"0x-mesh-testing-rendezvous"} ) // dummyMessageHandler satisfies the MessageHandler interface but considers all @@ -171,7 +168,7 @@ func (mh *inMemoryMessageHandler) store(messages []*Message) error { for _, msg := range messages { found := false for _, existing := range mh.messages { - if bytes.Compare(existing.Data, msg.Data) == 0 { + if bytes.Equal(existing.Data, msg.Data) { found = true break } diff --git a/p2p/ratevalidator/validator_test.go b/p2p/ratevalidator/validator_test.go index 55b556d60..1e2efc999 100644 --- a/p2p/ratevalidator/validator_test.go +++ b/p2p/ratevalidator/validator_test.go @@ -23,7 +23,7 @@ var peerIDs []peer.ID func init() { for _, peerIDString := range peerIDStrings { - peerID, _ := peer.IDB58Decode(peerIDString) + peerID, _ := peer.Decode(peerIDString) peerIDs = append(peerIDs, peerID) } } diff --git a/packages/mesh-browser/go/conversion-test/conversion_test.go b/packages/mesh-browser/go/conversion-test/conversion_test.go index 6621be402..a3ae2ae3d 100644 --- a/packages/mesh-browser/go/conversion-test/conversion_test.go +++ b/packages/mesh-browser/go/conversion-test/conversion_test.go @@ -23,7 +23,7 @@ var testCases []string var browserConversionTestsEnabled bool // The test `TestBrowserConversions` has a non-standard timeout, so it needs to be -// run seperately from other go tests. +// run separately from other go tests. func init() { flag.BoolVar(&browserConversionTestsEnabled, "enable-browser-conversion-tests", false, "enable browser conversion tests") testing.Init() @@ -92,16 +92,14 @@ func TestBrowserConversions(t *testing.T) { }() go func() { - select { - case <-done: - // NOTE(jalextowle): It is somewhat useful to know whether or not - // there are test results that were logged in the typescript but were - // not registered in this test file. For these purposes, we wait for - // last logs to appear before closing the test. Logs that are logged - // after the sleeping period will still be ignored. - time.Sleep(2 * time.Second) - cancel() - } + <-done + // NOTE(jalextowle): It is somewhat useful to know whether or not + // there are test results that were logged in the typescript but were + // not registered in this test file. For these purposes, we wait for + // last logs to appear before closing the test. Logs that are logged + // after the sleeping period will still be ignored. + time.Sleep(2 * time.Second) + cancel() }() wg.Wait() @@ -473,22 +471,24 @@ func startBrowserInstance(t *testing.T, ctx context.Context, url string, done ch } } +const meshRootDirectory = "../../../../" + func buildForTests(t *testing.T, ctx context.Context) { fmt.Println("Clear yarn cache...") cmd := exec.CommandContext(ctx, "yarn", "cache", "clean") - cmd.Dir = "../../../../" + cmd.Dir = meshRootDirectory output, err := cmd.CombinedOutput() require.NoError(t, err, "could not clean yarn cache: %s", string(output)) fmt.Println("Installing dependencies for Wasm binary and Typescript bindings...") cmd = exec.CommandContext(ctx, "yarn", "install") - cmd.Dir = "../../../../" + cmd.Dir = meshRootDirectory output, err = cmd.CombinedOutput() - require.NoError(t, err, "could not install depedencies for TypeScript bindings: %s", string(output)) + require.NoError(t, err, "could not install dependencies for TypeScript bindings: %s", string(output)) fmt.Println("Building Wasm binary and Typescript bindings...") cmd = exec.CommandContext(ctx, "yarn", "build") - cmd.Dir = "../../../../" + cmd.Dir = meshRootDirectory output, err = cmd.CombinedOutput() require.NoError(t, err, "could not build Wasm binary and Typescript bindings: %s", string(output)) fmt.Println("Finished building for tests") diff --git a/scenario/scenario.go b/scenario/scenario.go index 679250cc7..5c85d02aa 100644 --- a/scenario/scenario.go +++ b/scenario/scenario.go @@ -96,8 +96,8 @@ func NewSignedTestOrder(t *testing.T, opts ...orderopts.Option) *zeroex.SignedOr return signedOrder } -// NewSignedTestOrdersBatch effeciently creates numOrders orders with independent options. -// If the options require setting up maker or taker state, that state will be set up effeciently +// NewSignedTestOrdersBatch efficiently creates numOrders orders with independent options. +// If the options require setting up maker or taker state, that state will be set up efficiently // with one transaction per address. // // optionsForIndex is a function which returns the options for creating the order at a specific diff --git a/zeroex/asset_data_decoder.go b/zeroex/asset_data_decoder.go index 269613507..f2b891156 100644 --- a/zeroex/asset_data_decoder.go +++ b/zeroex/asset_data_decoder.go @@ -132,35 +132,35 @@ func NewAssetDataDecoder() *AssetDataDecoder { log.WithField("erc20BridgeAssetDataABI", erc20BridgeAssetDataAbi).Panic("erc20BridgeAssetDataABI should be ABI parsable") } idToAssetDataInfo := map[string]assetDataInfo{ - ERC20AssetDataID: assetDataInfo{ + ERC20AssetDataID: { name: "ERC20Token", abi: erc20AssetDataABI, }, - ERC721AssetDataID: assetDataInfo{ + ERC721AssetDataID: { name: "ERC721Token", abi: erc721AssetDataABI, }, - ERC1155AssetDataID: assetDataInfo{ + ERC1155AssetDataID: { name: "ERC1155Assets", abi: erc1155AssetDataABI, }, - StaticCallAssetDataID: assetDataInfo{ + StaticCallAssetDataID: { name: "StaticCall", abi: staticCallAssetDataABI, }, - CheckGasPriceDefaultID: assetDataInfo{ + CheckGasPriceDefaultID: { name: "checkGasPrice", abi: checkGasPriceDefaultStaticCallDataABI, }, - CheckGasPriceID: assetDataInfo{ + CheckGasPriceID: { name: "checkGasPrice", abi: checkGasPriceStaticCallDataABI, }, - MultiAssetDataID: assetDataInfo{ + MultiAssetDataID: { name: "MultiAsset", abi: multiAssetDataABI, }, - ERC20BridgeAssetDataID: assetDataInfo{ + ERC20BridgeAssetDataID: { name: "ERC20Bridge", abi: erc20BridgeAssetDataABI, }, @@ -180,7 +180,7 @@ func (a *AssetDataDecoder) GetName(assetData []byte) (string, error) { idHex := common.Bytes2Hex(id) info, ok := a.idToAssetDataInfo[idHex] if !ok { - return "", errors.New(fmt.Sprintf("Unrecognized assetData with prefix: %s", idHex)) + return "", fmt.Errorf("Unrecognized assetData with prefix: %s", idHex) } return info.name, nil } @@ -194,7 +194,7 @@ func (a *AssetDataDecoder) Decode(assetData []byte, decodedAssetData interface{} idHex := common.Bytes2Hex(id) info, ok := a.idToAssetDataInfo[idHex] if !ok { - return errors.New(fmt.Sprintf("Unrecognized assetData with prefix: %s", idHex)) + return fmt.Errorf("Unrecognized assetData with prefix: %s", idHex) } // This is necessary to prevent a nil pointer exception for ABIs with no inputs diff --git a/zeroex/order.go b/zeroex/order.go index 3f6f8de4b..de8764cc8 100644 --- a/zeroex/order.go +++ b/zeroex/order.go @@ -603,8 +603,6 @@ func (s SignedOrder) MarshalJSON() ([]byte, error) { return signedOrderBytes, err } -const addressHexLength = 42 - // UnmarshalJSON implements a custom JSON unmarshaller for the SignedOrder type func (s *SignedOrder) UnmarshalJSON(data []byte) error { var signedOrderJSON SignedOrderJSON diff --git a/zeroex/ordervalidator/order_validator.go b/zeroex/ordervalidator/order_validator.go index 0474e5b54..cfc0805f1 100644 --- a/zeroex/ordervalidator/order_validator.go +++ b/zeroex/ordervalidator/order_validator.go @@ -275,6 +275,16 @@ func (o *OrderValidator) BatchValidate(ctx context.Context, signedOrders []*zero for _, signedOrders := range signedOrderChunks { wg.Add(1) go func(signedOrders []*zeroex.SignedOrder) { + // FIXME - Is this needed? + // trimmedOrders := []wrappers.LibOrderOrder{} + // for _, signedOrder := range signedOrders { + // trimmedOrders = append(trimmedOrders, signedOrder.Trim()) + // } + // signatures := [][]byte{} + // for _, signedOrder := range signedOrders { + // signatures = append(signatures, signedOrder.Signature) + // } + defer wg.Done() select { @@ -390,7 +400,7 @@ func (o *OrderValidator) BatchOffchainValidation(signedOrders []*zeroex.SignedOr } } - isSupportedSignature := isSupportedSignature(signedOrder.Signature, orderHash) + isSupportedSignature := isSupportedSignature(signedOrder.Signature) if !isSupportedSignature { rejectedOrderInfos = append(rejectedOrderInfos, &RejectedOrderInfo{ OrderHash: orderHash, @@ -686,7 +696,7 @@ func (o *OrderValidator) computeOptimalChunkSizes(signedOrders []*zeroex.SignedO return chunkSizes } -func isSupportedSignature(signature []byte, orderHash common.Hash) bool { +func isSupportedSignature(signature []byte) bool { if len(signature) == 0 { return false } diff --git a/zeroex/ordervalidator/order_validator_test.go b/zeroex/ordervalidator/order_validator_test.go index ea2bb93d1..755b64514 100644 --- a/zeroex/ordervalidator/order_validator_test.go +++ b/zeroex/ordervalidator/order_validator_test.go @@ -42,10 +42,7 @@ const areNewOrders = false const emptyGetOrderRelevantStatesCallDataStringLength = 268 const ( - maxEthRPCRequestsPer24HrUTC = 1000000 - maxEthRPCRequestsPerSeconds = 1000.0 - defaultCheckpointInterval = 1 * time.Minute - defaultEthRPCTimeout = 5 * time.Second + defaultEthRPCTimeout = 5 * time.Second ) var ( @@ -104,41 +101,41 @@ func init() { func TestBatchValidateOffChainCases(t *testing.T) { var testCases = []testCase{ - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.MakerAssetAmount(big.NewInt(0))), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidMakerAssetAmount, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.TakerAssetAmount(big.NewInt(0))), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidTakerAssetAmount, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.MakerAssetData(multiAssetAssetData)), IsValid: true, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.MakerAssetData(malformedAssetData)), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidMakerAssetData, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.TakerAssetData(malformedAssetData)), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidTakerAssetData, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.MakerAssetData(unsupportedAssetData)), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidMakerAssetData, }, - testCase{ + { SignedOrder: scenario.NewSignedTestOrder(t, orderopts.TakerAssetData(unsupportedAssetData)), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidTakerAssetData, }, - testCase{ + { SignedOrder: signedOrderWithCustomSignature(t, malformedSignature), IsValid: false, ExpectedRejectedOrderStatus: ROInvalidSignature, @@ -456,7 +453,3 @@ func signedOrderWithCustomSignature(t *testing.T, signature []byte) *zeroex.Sign signedOrder.Signature = signature return signedOrder } - -func copyOrder(order zeroex.Order) zeroex.Order { - return order -} diff --git a/zeroex/orderwatch/decoder/event_decoder.go b/zeroex/orderwatch/decoder/event_decoder.go index 6f1f86401..9c462fe07 100644 --- a/zeroex/orderwatch/decoder/event_decoder.go +++ b/zeroex/orderwatch/decoder/event_decoder.go @@ -871,14 +871,12 @@ func (d *Decoder) decodeERC721(log types.Log, decodedLog interface{}) error { } erc721Err := unpackLog(decodedLog, eventName, log, d.erc721ABI) - if erc721Err != nil { - if _, ok := erc721Err.(UnsupportedEventError); ok { - // Try unpacking using the incorrect ERC721 event ABIs - fallbackErr := unpackLog(decodedLog, eventName, log, d.erc721EventsAbiWithoutTokenIDIndex) - if fallbackErr != nil { - // We return the original attempt's error if the fallback fails - return erc721Err - } + if _, ok := erc721Err.(UnsupportedEventError); ok { + // Try unpacking using the incorrect ERC721 event ABIs + fallbackErr := unpackLog(decodedLog, eventName, log, d.erc721EventsAbiWithoutTokenIDIndex) + if fallbackErr != nil { + // We return the original attempt's error if the fallback fails + return erc721Err } } return nil diff --git a/zeroex/orderwatch/order_watcher.go b/zeroex/orderwatch/order_watcher.go index 359bafc97..f411bc7d2 100644 --- a/zeroex/orderwatch/order_watcher.go +++ b/zeroex/orderwatch/order_watcher.go @@ -55,9 +55,6 @@ const ( // corresponds to a block depth of ~25. permanentlyDeleteAfter = 5 * time.Minute - // defaultMaxOrders is the default max number of orders in storage. - defaultMaxOrders = 100000 - // maxBlockEventsToHandle is the max number of block events we want to // process in a single call to `handleBlockEvents` maxBlockEventsToHandle = 500 @@ -249,7 +246,7 @@ func (w *Watcher) cleanupLoop(ctx context.Context) error { } func (w *Watcher) removedCheckerLoop(ctx context.Context) error { - if err := w.permanentlyDeleteStaleRemovedOrders(ctx); err != nil { + if err := w.permanentlyDeleteStaleRemovedOrders(); err != nil { return err } lastDeleted := time.Now() @@ -266,7 +263,7 @@ func (w *Watcher) removedCheckerLoop(ctx context.Context) error { databaseUtilization := float64(count) / float64(w.maxOrders) if time.Since(lastDeleted) > maxDeleteInterval || databaseUtilization > databaseUtilizationThreshold { - if err := w.permanentlyDeleteStaleRemovedOrders(ctx); err != nil { + if err := w.permanentlyDeleteStaleRemovedOrders(); err != nil { return err } lastDeleted = time.Now() @@ -464,55 +461,6 @@ func (w *Watcher) handleBlockEvents(ctx context.Context, events []*blockwatch.Ev return nil } -// processRecentlyValidatedOrders creates a mapping from revalidation blocks to -// orders in the list of recently validated orders and returns this mapping along -// with the oldest revalidation block number. As it processes these orders, it -// ignores any recently validated orders that are up-to-date with respect to contract -// events in the orderwatcher and cannot possibly be missing relevant contract events. -// Any orders that were validated in a block that is no longer stored in the database -// must be revalidated in case the orderwatcher missed a contract event that is no -// longer stored. -// -// NOTE(jalextowle): Provided that a recently validated order was not validated more -// than 128 blocks ago, we could recover these block events to verify if it needs to -// be revalidated. We choose not to currently because revalidation will likely be more -// performant, but this may not always be the case. -func processRecentlyValidatedOrders( - recentlyValidatedOrders []*types.OrderWithMetadata, - orderHashToDBOrder map[common.Hash]*types.OrderWithMetadata, - orderHashToEvents map[common.Hash][]*zeroex.ContractEvent, - oldestBlockNumberFromEvents *big.Int, - oldestBlockNumberInDB *big.Int, -) ( - revalidationBlockToOrder map[*big.Int][]*types.OrderWithMetadata, - oldestRevalidationBlockNumber *big.Int, -) { - for _, recentlyValidatedOrder := range recentlyValidatedOrders { - previousValidationBlockNumber := recentlyValidatedOrder.LastValidatedBlockNumber - // If the oldestBlock in the list of block events is greater then - // the last validated block of the recently validated orders, we - // may be missing block events for this order. - if oldestRevalidationBlockNumber == nil || previousValidationBlockNumber.Cmp(oldestRevalidationBlockNumber) == -1 { - oldestRevalidationBlockNumber = previousValidationBlockNumber - } - if oldestBlockNumberFromEvents.Cmp(previousValidationBlockNumber) == -1 { - continue - } - // If the previous validation block of the order is a predecessor - // of the oldest block in the blockwatcher, we must revalidate the - // order because we may be missing relevant block events. - if oldestBlockNumberInDB.Cmp(previousValidationBlockNumber) == 1 { - orderHashToDBOrder[recentlyValidatedOrder.Hash] = recentlyValidatedOrder - orderHashToEvents[recentlyValidatedOrder.Hash] = []*zeroex.ContractEvent{} - } - revalidationBlockToOrder[previousValidationBlockNumber] = append( - revalidationBlockToOrder[previousValidationBlockNumber], - recentlyValidatedOrder, - ) - } - return revalidationBlockToOrder, oldestRevalidationBlockNumber -} - // RevalidateOrdersForMissingEvents checks all of the orders in the database for // any events in the miniheaders table that may have been missed. This should only // be used on startup, as there is a different mechanism that serves this purpose @@ -521,7 +469,7 @@ func processRecentlyValidatedOrders( // NOTE(jalextowle): This function can miss block events if the blockwatcher was // behind by more than db.MaxMiniHeaders when `handleBlockEvents` was last called. // This is extremely unlikely, so we have decided not to implement more costly -// mechanisms to prevent from this possibility from occuring. +// mechanisms to prevent from this possibility from occurring. func (w *Watcher) RevalidateOrdersForMissingEvents(ctx context.Context) error { miniHeaders, err := w.db.FindMiniHeaders(nil) if err != nil { @@ -618,13 +566,13 @@ func (w *Watcher) findOrdersByEventWithLastValidatedBlockNumber( func (w *Watcher) findOrdersAffectedByContractEvents(log ethtypes.Log, filter db.OrderFilter) (*zeroex.ContractEvent, []*types.OrderWithMetadata, error) { eventType, err := w.eventDecoder.FindEventType(log) if err != nil { - switch err.(type) { + switch err := err.(type) { case decoder.UntrackedTokenError: return nil, nil, nil case decoder.UnsupportedEventError: logger.WithFields(logger.Fields{ - "topics": err.(decoder.UnsupportedEventError).Topics, - "contractAddress": err.(decoder.UnsupportedEventError).ContractAddress, + "topics": err.Topics, + "contractAddress": err.ContractAddress, }).Info("unsupported event found while trying to find its event type") return nil, nil, nil default: @@ -978,7 +926,7 @@ func (w *Watcher) Cleanup(ctx context.Context, lastUpdatedBuffer time.Duration) return nil } -func (w *Watcher) permanentlyDeleteStaleRemovedOrders(ctx context.Context) error { +func (w *Watcher) permanentlyDeleteStaleRemovedOrders() error { // TODO(albrow): This could be optimized by using a single query to delete // stale orders instead of finding them and deleting one-by-one. Limited by // the fact that we need to update in-memory state. When we remove in-memory @@ -1837,10 +1785,6 @@ func (w *Watcher) unwatchOrder(order *types.OrderWithMetadata, newFillableAmount } } -type orderDeleter interface { - Delete(id []byte) error -} - func (w *Watcher) permanentlyDeleteOrder(order *types.OrderWithMetadata) error { if err := w.db.DeleteOrder(order.Hash); err != nil { return err @@ -2035,8 +1979,3 @@ func (w *Watcher) WaitForAtLeastOneBlockToBeProcessed(ctx context.Context) error return errors.New("timed out waiting for first block to be processed by Mesh node. Check your backing Ethereum RPC endpoint") } } - -type logWithType struct { - Type string - Log ethtypes.Log -} diff --git a/zeroex/orderwatch/order_watcher_test.go b/zeroex/orderwatch/order_watcher_test.go index 0a157dbe7..4c95a35f8 100644 --- a/zeroex/orderwatch/order_watcher_test.go +++ b/zeroex/orderwatch/order_watcher_test.go @@ -37,11 +37,8 @@ import ( const ( blockRetentionLimit = 20 ethereumRPCRequestTimeout = 30 * time.Second - miniHeaderRetentionLimit = 2 blockPollingInterval = 1 * time.Second ethereumRPCMaxContentLength = 524288 - maxEthRPCRequestsPer24HrUTC = 1000000 - maxEthRPCRequestsPerSeconds = 1000.0 // processBlockSleepTime is the amount of time ot wait for order watcher to // process block events. If possible, we should listen for order events instead @@ -197,7 +194,7 @@ func TestOrderWatcherStoresValidOrders(t *testing.T) { orderopts.SetupMakerState(true), orderopts.MakerAssetData(scenario.ZRXAssetData), ) - setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + setupOrderWatcherScenario(ctx, t, database, signedOrder) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) @@ -236,7 +233,7 @@ func TestOrderWatcherUnfundedInsufficientERC20Balance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Transfer makerAsset out of maker address opts := &bind.TransactOpts{ @@ -294,7 +291,7 @@ func TestOrderWatcherUnfundedInsufficientERC20BalanceForMakerFee(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Transfer makerAsset out of maker address opts := &bind.TransactOpts{ @@ -350,7 +347,7 @@ func TestOrderWatcherUnfundedInsufficientERC721Balance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Transfer makerAsset out of maker address opts := &bind.TransactOpts{ @@ -407,7 +404,7 @@ func TestOrderWatcherUnfundedInsufficientERC721Allowance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Remove Maker's NFT approval to ERC721Proxy. We do this by setting the // operator/spender to the null address. @@ -463,7 +460,7 @@ func TestOrderWatcherUnfundedInsufficientERC1155Allowance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Remove Maker's ERC1155 approval to ERC1155Proxy opts := &bind.TransactOpts{ @@ -520,7 +517,7 @@ func TestOrderWatcherUnfundedInsufficientERC1155Balance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Reduce Maker's ERC1155 balance opts := &bind.TransactOpts{ @@ -573,7 +570,7 @@ func TestOrderWatcherUnfundedInsufficientERC20Allowance(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Remove Maker's ZRX approval to ERC20Proxy opts := &bind.TransactOpts{ @@ -627,7 +624,7 @@ func TestOrderWatcherUnfundedThenFundedAgain(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Transfer makerAsset out of maker address opts := &bind.TransactOpts{ @@ -715,7 +712,7 @@ func TestOrderWatcherNoChange(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, _ := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, _ := setupOrderWatcherScenario(ctx, t, database, signedOrder) latestStoredBlock, err := database.GetLatestMiniHeader() require.NoError(t, err) @@ -785,7 +782,7 @@ func TestOrderWatcherWETHWithdrawAndDeposit(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Withdraw maker's WETH (i.e. decrease WETH balance) // HACK(fabio): For some reason the txn fails with "out of gas" error with the @@ -872,7 +869,7 @@ func TestOrderWatcherCanceled(t *testing.T) { signedOrder := scenario.NewSignedTestOrder(t, orderopts.SetupMakerState(true)) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Cancel order opts := &bind.TransactOpts{ @@ -923,7 +920,7 @@ func TestOrderWatcherCancelUpTo(t *testing.T) { signedOrder := scenario.NewSignedTestOrder(t, orderopts.SetupMakerState(true)) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Cancel order with epoch opts := &bind.TransactOpts{ @@ -978,7 +975,7 @@ func TestOrderWatcherERC20Filled(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Fill order opts := &bind.TransactOpts{ @@ -1034,7 +1031,7 @@ func TestOrderWatcherERC20PartiallyFilled(t *testing.T) { ) expectedOrderHash, err := signedOrder.ComputeOrderHash() require.NoError(t, err) - blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, ethClient, database, signedOrder) + blockWatcher, orderEventsChan := setupOrderWatcherScenario(ctx, t, database, signedOrder) // Partially fill order opts := &bind.TransactOpts{ @@ -1386,7 +1383,7 @@ func TestOrderWatcherCleanup(t *testing.T) { require.NoError(t, err) select { - case _ = <-orderEventsChan: + case <-orderEventsChan: t.Error("Expected no orderEvents to fire after calling Cleanup()") case <-time.After(100 * time.Millisecond): // Noop @@ -1979,7 +1976,7 @@ func TestMissingOrderEventsWithMissingBlocks(t *testing.T) { assert.Equal(t, orderHash, orderEvents[0].OrderHash) } -func setupOrderWatcherScenario(ctx context.Context, t *testing.T, ethClient *ethclient.Client, database *db.DB, signedOrder *zeroex.SignedOrder) (*blockwatch.Watcher, chan []*zeroex.OrderEvent) { +func setupOrderWatcherScenario(ctx context.Context, t *testing.T, database *db.DB, signedOrder *zeroex.SignedOrder) (*blockwatch.Watcher, chan []*zeroex.OrderEvent) { blockWatcher, orderWatcher := setupOrderWatcher(ctx, t, ethRPCClient, database) // Start watching an order @@ -2090,7 +2087,7 @@ func setupSubTest(t *testing.T) func(t *testing.T) { } } -func waitForOrderEvents(t *testing.T, orderEventsChan <-chan []*zeroex.OrderEvent, expectedNumberOfEvents int, timeout time.Duration) []*zeroex.OrderEvent { +func waitForOrderEvents(t *testing.T, orderEventsChan <-chan []*zeroex.OrderEvent, expectedNumberOfEvents int, waitTimeForOrderEvents time.Duration) []*zeroex.OrderEvent { allOrderEvents := []*zeroex.OrderEvent{} for { select { @@ -2100,7 +2097,7 @@ func waitForOrderEvents(t *testing.T, orderEventsChan <-chan []*zeroex.OrderEven return allOrderEvents } continue - case <-time.After(timeout): + case <-time.After(waitTimeForOrderEvents): t.Fatalf("timed out waiting for %d order events (received %d events)", expectedNumberOfEvents, len(allOrderEvents)) } }