diff --git a/node/cmd/guardiand/node.go b/node/cmd/guardiand/node.go index 4ac066e989..95cefaeb9b 100644 --- a/node/cmd/guardiand/node.go +++ b/node/cmd/guardiand/node.go @@ -974,21 +974,21 @@ func runNode(cmd *cobra.Command, args []string) { signedQueryReqReadC, signedQueryReqWriteC := makeChannelPair[*gossipv1.SignedQueryRequest](common.SignedQueryRequestChannelSize) // Per-chain query requests - chainQueryReqC := make(map[vaa.ChainID]chan *common.QueryRequest) + chainQueryReqC := make(map[vaa.ChainID]chan *common.PerChainQueryInternal) // Query responses from watchers to query handler aggregated across all chains - queryResponseReadC, queryResponseWriteC := makeChannelPair[*common.QueryResponse](0) + queryResponseReadC, queryResponseWriteC := makeChannelPair[*common.PerChainQueryResponseInternal](0) // Query responses from query handler to p2p queryResponsePublicationReadC, queryResponsePublicationWriteC := makeChannelPair[*common.QueryResponsePublication](0) // Per-chain query response channel - chainQueryResponseC := make(map[vaa.ChainID]chan *common.QueryResponse) + chainQueryResponseC := make(map[vaa.ChainID]chan *common.PerChainQueryResponseInternal) // aggregate per-chain msgC into msgC. // SECURITY defense-in-depth: This way we enforce that a watcher must set the msg.EmitterChain to its chainId, which makes the code easier to audit for _, chainId := range vaa.GetAllNetworkIDs() { - chainQueryResponseC[chainId] = make(chan *common.QueryResponse) - go func(c <-chan *common.QueryResponse, chainId vaa.ChainID) { + chainQueryResponseC[chainId] = make(chan *common.PerChainQueryResponseInternal) + go func(c <-chan *common.PerChainQueryResponseInternal, chainId vaa.ChainID) { for { select { case <-rootCtx.Done(): @@ -1210,7 +1210,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Ethereum watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDEthereum) chainObsvReqC[vaa.ChainIDEthereum] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDEthereum] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDEthereum] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) ethWatcher = evm.NewEthWatcher(*ethRPC, ethContractAddr, "eth", vaa.ChainIDEthereum, chainMsgC[vaa.ChainIDEthereum], setWriteC, chainObsvReqC[vaa.ChainIDEthereum], chainQueryReqC[vaa.ChainIDEthereum], chainQueryResponseC[vaa.ChainIDEthereum], *unsafeDevMode) if err := supervisor.Run(ctx, "ethwatch", common.WrapWithScissors(ethWatcher.Run, "ethwatch")); err != nil { @@ -1222,7 +1222,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting BSC watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDBSC) chainObsvReqC[vaa.ChainIDBSC] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDBSC] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDBSC] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) bscWatcher := evm.NewEthWatcher(*bscRPC, bscContractAddr, "bsc", vaa.ChainIDBSC, chainMsgC[vaa.ChainIDBSC], nil, chainObsvReqC[vaa.ChainIDBSC], chainQueryReqC[vaa.ChainIDBSC], chainQueryResponseC[vaa.ChainIDBSC], *unsafeDevMode) bscWatcher.SetWaitForConfirmations(true) if err := supervisor.Run(ctx, "bscwatch", common.WrapWithScissors(bscWatcher.Run, "bscwatch")); err != nil { @@ -1239,7 +1239,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Polygon watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDPolygon) chainObsvReqC[vaa.ChainIDPolygon] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDPolygon] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDPolygon] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) polygonWatcher := evm.NewEthWatcher(*polygonRPC, polygonContractAddr, "polygon", vaa.ChainIDPolygon, chainMsgC[vaa.ChainIDPolygon], nil, chainObsvReqC[vaa.ChainIDPolygon], chainQueryReqC[vaa.ChainIDPolygon], chainQueryResponseC[vaa.ChainIDPolygon], *unsafeDevMode) polygonWatcher.SetWaitForConfirmations(waitForConfirmations) if err := polygonWatcher.SetRootChainParams(*polygonRootChainRpc, *polygonRootChainContractAddress); err != nil { @@ -1253,7 +1253,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Avalanche watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDAvalanche) chainObsvReqC[vaa.ChainIDAvalanche] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDAvalanche] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDAvalanche] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "avalanchewatch", common.WrapWithScissors(evm.NewEthWatcher(*avalancheRPC, avalancheContractAddr, "avalanche", vaa.ChainIDAvalanche, chainMsgC[vaa.ChainIDAvalanche], nil, chainObsvReqC[vaa.ChainIDAvalanche], chainQueryReqC[vaa.ChainIDAvalanche], chainQueryResponseC[vaa.ChainIDAvalanche], *unsafeDevMode).Run, "avalanchewatch")); err != nil { return err @@ -1263,7 +1263,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Oasis watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDOasis) chainObsvReqC[vaa.ChainIDOasis] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDOasis] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDOasis] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "oasiswatch", common.WrapWithScissors(evm.NewEthWatcher(*oasisRPC, oasisContractAddr, "oasis", vaa.ChainIDOasis, chainMsgC[vaa.ChainIDOasis], nil, chainObsvReqC[vaa.ChainIDOasis], chainQueryReqC[vaa.ChainIDOasis], chainQueryResponseC[vaa.ChainIDOasis], *unsafeDevMode).Run, "oasiswatch")); err != nil { return err @@ -1273,7 +1273,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Aurora watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDAurora) chainObsvReqC[vaa.ChainIDAurora] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDAurora] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDAurora] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "aurorawatch", common.WrapWithScissors(evm.NewEthWatcher(*auroraRPC, auroraContractAddr, "aurora", vaa.ChainIDAurora, chainMsgC[vaa.ChainIDAurora], nil, chainObsvReqC[vaa.ChainIDAurora], chainQueryReqC[vaa.ChainIDAurora], chainQueryResponseC[vaa.ChainIDAurora], *unsafeDevMode).Run, "aurorawatch")); err != nil { return err @@ -1283,7 +1283,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Fantom watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDFantom) chainObsvReqC[vaa.ChainIDFantom] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDFantom] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDFantom] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "fantomwatch", common.WrapWithScissors(evm.NewEthWatcher(*fantomRPC, fantomContractAddr, "fantom", vaa.ChainIDFantom, chainMsgC[vaa.ChainIDFantom], nil, chainObsvReqC[vaa.ChainIDFantom], chainQueryReqC[vaa.ChainIDFantom], chainQueryResponseC[vaa.ChainIDFantom], *unsafeDevMode).Run, "fantomwatch")); err != nil { return err @@ -1293,7 +1293,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Karura watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDKarura) chainObsvReqC[vaa.ChainIDKarura] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDKarura] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDKarura] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "karurawatch", common.WrapWithScissors(evm.NewEthWatcher(*karuraRPC, karuraContractAddr, "karura", vaa.ChainIDKarura, chainMsgC[vaa.ChainIDKarura], nil, chainObsvReqC[vaa.ChainIDKarura], chainQueryReqC[vaa.ChainIDKarura], chainQueryResponseC[vaa.ChainIDKarura], *unsafeDevMode).Run, "karurawatch")); err != nil { return err @@ -1303,7 +1303,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Acala watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDAcala) chainObsvReqC[vaa.ChainIDAcala] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDAcala] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDAcala] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "acalawatch", common.WrapWithScissors(evm.NewEthWatcher(*acalaRPC, acalaContractAddr, "acala", vaa.ChainIDAcala, chainMsgC[vaa.ChainIDAcala], nil, chainObsvReqC[vaa.ChainIDAcala], chainQueryReqC[vaa.ChainIDAcala], chainQueryResponseC[vaa.ChainIDAcala], *unsafeDevMode).Run, "acalawatch")); err != nil { return err @@ -1313,7 +1313,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Klaytn watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDKlaytn) chainObsvReqC[vaa.ChainIDKlaytn] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDKlaytn] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDKlaytn] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "klaytnwatch", common.WrapWithScissors(evm.NewEthWatcher(*klaytnRPC, klaytnContractAddr, "klaytn", vaa.ChainIDKlaytn, chainMsgC[vaa.ChainIDKlaytn], nil, chainObsvReqC[vaa.ChainIDKlaytn], chainQueryReqC[vaa.ChainIDKlaytn], chainQueryResponseC[vaa.ChainIDKlaytn], *unsafeDevMode).Run, "klaytnwatch")); err != nil { return err @@ -1323,7 +1323,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Celo watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDCelo) chainObsvReqC[vaa.ChainIDCelo] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDCelo] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDCelo] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "celowatch", common.WrapWithScissors(evm.NewEthWatcher(*celoRPC, celoContractAddr, "celo", vaa.ChainIDCelo, chainMsgC[vaa.ChainIDCelo], nil, chainObsvReqC[vaa.ChainIDCelo], chainQueryReqC[vaa.ChainIDCelo], chainQueryResponseC[vaa.ChainIDCelo], *unsafeDevMode).Run, "celowatch")); err != nil { return err @@ -1333,7 +1333,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Moonbeam watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDMoonbeam) chainObsvReqC[vaa.ChainIDMoonbeam] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDMoonbeam] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDMoonbeam] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) if err := supervisor.Run(ctx, "moonbeamwatch", common.WrapWithScissors(evm.NewEthWatcher(*moonbeamRPC, moonbeamContractAddr, "moonbeam", vaa.ChainIDMoonbeam, chainMsgC[vaa.ChainIDMoonbeam], nil, chainObsvReqC[vaa.ChainIDMoonbeam], chainQueryReqC[vaa.ChainIDMoonbeam], chainQueryResponseC[vaa.ChainIDMoonbeam], *unsafeDevMode).Run, "moonbeamwatch")); err != nil { return err @@ -1346,7 +1346,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Arbitrum watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDArbitrum) chainObsvReqC[vaa.ChainIDArbitrum] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDArbitrum] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDArbitrum] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) arbitrumWatcher := evm.NewEthWatcher(*arbitrumRPC, arbitrumContractAddr, "arbitrum", vaa.ChainIDArbitrum, chainMsgC[vaa.ChainIDArbitrum], nil, chainObsvReqC[vaa.ChainIDArbitrum], chainQueryReqC[vaa.ChainIDArbitrum], chainQueryResponseC[vaa.ChainIDArbitrum], *unsafeDevMode) arbitrumWatcher.SetL1Finalizer(ethWatcher) if err := supervisor.Run(ctx, "arbitrumwatch", common.WrapWithScissors(arbitrumWatcher.Run, "arbitrumwatch")); err != nil { @@ -1357,7 +1357,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Optimism watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDOptimism) chainObsvReqC[vaa.ChainIDOptimism] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDOptimism] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDOptimism] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) optimismWatcher := evm.NewEthWatcher(*optimismRPC, optimismContractAddr, "optimism", vaa.ChainIDOptimism, chainMsgC[vaa.ChainIDOptimism], nil, chainObsvReqC[vaa.ChainIDOptimism], chainQueryReqC[vaa.ChainIDOptimism], chainQueryResponseC[vaa.ChainIDOptimism], *unsafeDevMode) if err := supervisor.Run(ctx, "optimismwatch", common.WrapWithScissors(optimismWatcher.Run, "optimismwatch")); err != nil { @@ -1477,7 +1477,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Neon watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDNeon) chainObsvReqC[vaa.ChainIDNeon] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDNeon] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDNeon] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) neonWatcher := evm.NewEthWatcher(*neonRPC, neonContractAddr, "neon", vaa.ChainIDNeon, chainMsgC[vaa.ChainIDNeon], nil, chainObsvReqC[vaa.ChainIDNeon], chainQueryReqC[vaa.ChainIDNeon], chainQueryResponseC[vaa.ChainIDNeon], *unsafeDevMode) neonWatcher.SetL1Finalizer(solanaFinalizedWatcher) if err := supervisor.Run(ctx, "neonwatch", common.WrapWithScissors(neonWatcher.Run, "neonwatch")); err != nil { @@ -1488,7 +1488,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Base watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDBase) chainObsvReqC[vaa.ChainIDBase] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDBase] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDBase] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) baseWatcher := evm.NewEthWatcher(*baseRPC, baseContractAddr, "base", vaa.ChainIDBase, chainMsgC[vaa.ChainIDBase], nil, chainObsvReqC[vaa.ChainIDBase], chainQueryReqC[vaa.ChainIDBase], chainQueryResponseC[vaa.ChainIDBase], *unsafeDevMode) if err := supervisor.Run(ctx, "basewatch", common.WrapWithScissors(baseWatcher.Run, "basewatch")); err != nil { return err @@ -1501,7 +1501,7 @@ func runNode(cmd *cobra.Command, args []string) { logger.Info("Starting Sepolia watcher") common.MustRegisterReadinessSyncing(vaa.ChainIDSepolia) chainObsvReqC[vaa.ChainIDSepolia] = make(chan *gossipv1.ObservationRequest, observationRequestBufferSize) - chainQueryReqC[vaa.ChainIDSepolia] = make(chan *common.QueryRequest, queryRequestBufferSize) + chainQueryReqC[vaa.ChainIDSepolia] = make(chan *common.PerChainQueryInternal, queryRequestBufferSize) sepoliaWatcher := evm.NewEthWatcher(*sepoliaRPC, sepoliaContractAddr, "sepolia", vaa.ChainIDSepolia, chainMsgC[vaa.ChainIDSepolia], nil, chainObsvReqC[vaa.ChainIDSepolia], chainQueryReqC[vaa.ChainIDSepolia], chainQueryResponseC[vaa.ChainIDSepolia], *unsafeDevMode) if err := supervisor.Run(ctx, "sepoliawatch", common.WrapWithScissors(sepoliaWatcher.Run, "sepoliawatch")); err != nil { return err diff --git a/node/cmd/guardiand/query.go b/node/cmd/guardiand/query.go index 09e73da291..1fd86be287 100644 --- a/node/cmd/guardiand/query.go +++ b/node/cmd/guardiand/query.go @@ -2,6 +2,7 @@ package guardiand import ( "context" + "encoding/hex" "fmt" "strings" "time" @@ -28,15 +29,23 @@ const ( type ( // pendingQuery is the cache entry for a given query. pendingQuery struct { - req *common.QueryRequest - channel chan *common.QueryRequest - receiveTime time.Time - lastUpdateTime time.Time - inProgress bool + signedRequest *gossipv1.SignedQueryRequest + request *gossipv1.QueryRequest + requestID string + receiveTime time.Time + queries []*perChainQuery + responses []*common.PerChainQueryResponseInternal // respPub is only populated when we need to retry sending the response to p2p. respPub *common.QueryResponsePublication } + + // perChainQuery is the data associated with a single per chain query in a query request. + perChainQuery struct { + req *common.PerChainQueryInternal + channel chan *common.PerChainQueryInternal + lastUpdateTime time.Time + } ) // handleQueryRequests multiplexes observation requests to the appropriate chain @@ -44,18 +53,55 @@ func handleQueryRequests( ctx context.Context, logger *zap.Logger, signedQueryReqC <-chan *gossipv1.SignedQueryRequest, - chainQueryReqC map[vaa.ChainID]chan *common.QueryRequest, + chainQueryReqC map[vaa.ChainID]chan *common.PerChainQueryInternal, + allowedRequestors map[ethCommon.Address]struct{}, + queryResponseReadC <-chan *common.PerChainQueryResponseInternal, + queryResponseWriteC chan<- *common.QueryResponsePublication, + env common.Environment, +) { + handleQueryRequestsImpl(ctx, logger, signedQueryReqC, chainQueryReqC, allowedRequestors, queryResponseReadC, queryResponseWriteC, env, requestTimeout, retryInterval) +} + +// handleQueryRequestsImpl allows instantiating the handler in the test environment with shorter timeout and retry parameters. +func handleQueryRequestsImpl( + ctx context.Context, + logger *zap.Logger, + signedQueryReqC <-chan *gossipv1.SignedQueryRequest, + chainQueryReqC map[vaa.ChainID]chan *common.PerChainQueryInternal, allowedRequestors map[ethCommon.Address]struct{}, - queryResponseReadC <-chan *common.QueryResponse, + queryResponseReadC <-chan *common.PerChainQueryResponseInternal, queryResponseWriteC chan<- *common.QueryResponsePublication, env common.Environment, + requestTimeoutImpl time.Duration, + retryIntervalImpl time.Duration, ) { qLogger := logger.With(zap.String("component", "ccqhandler")) qLogger.Info("cross chain queries are enabled", zap.Any("allowedRequestors", allowedRequestors), zap.String("env", string(env))) pendingQueries := make(map[string]*pendingQuery) // Key is requestID. - ticker := time.NewTicker(retryInterval) + // TODO: This should only include watchers that are actually running. Also need to test all these chains. + supportedChains := map[vaa.ChainID]struct{}{ + vaa.ChainIDEthereum: {}, + vaa.ChainIDBSC: {}, + vaa.ChainIDPolygon: {}, + vaa.ChainIDAvalanche: {}, + vaa.ChainIDOasis: {}, + vaa.ChainIDAurora: {}, + vaa.ChainIDFantom: {}, + vaa.ChainIDKarura: {}, + vaa.ChainIDAcala: {}, + vaa.ChainIDKlaytn: {}, + vaa.ChainIDCelo: {}, + vaa.ChainIDMoonbeam: {}, + vaa.ChainIDNeon: {}, + vaa.ChainIDArbitrum: {}, + vaa.ChainIDOptimism: {}, + vaa.ChainIDBase: {}, + vaa.ChainIDSepolia: {}, + } + + ticker := time.NewTicker(retryIntervalImpl) defer ticker.Stop() for { @@ -63,7 +109,7 @@ func handleQueryRequests( case <-ctx.Done(): return - case signedQueryRequest := <-signedQueryReqC: + case signedRequest := <-signedQueryReqC: // Inbound query request. // requestor validation happens here // request type validation is currently handled by the watcher // in the future, it may be worthwhile to catch certain types of @@ -73,107 +119,169 @@ func handleQueryRequests( // - length check on "to" address 20 bytes // - valid "block" strings - digest := common.QueryRequestDigest(env, signedQueryRequest.QueryRequest) + requestID := hex.EncodeToString(signedRequest.Signature) + digest := common.QueryRequestDigest(env, signedRequest.QueryRequest) - signerBytes, err := ethCrypto.Ecrecover(digest.Bytes(), signedQueryRequest.Signature) + signerBytes, err := ethCrypto.Ecrecover(digest.Bytes(), signedRequest.Signature) if err != nil { - qLogger.Error("failed to recover public key") + qLogger.Error("failed to recover public key", zap.String("requestID", requestID)) continue } signerAddress := ethCommon.BytesToAddress(ethCrypto.Keccak256(signerBytes[1:])[12:]) if _, exists := allowedRequestors[signerAddress]; !exists { - qLogger.Error("invalid requestor", zap.String("requestor", signerAddress.Hex())) + qLogger.Error("invalid requestor", zap.String("requestor", signerAddress.Hex()), zap.String("requestID", requestID)) + continue + } + + // Make sure this is not a duplicate request. TODO: Should we do something smarter here than just dropping the duplicate? + if oldReq, exists := pendingQueries[requestID]; exists { + qLogger.Warn("dropping duplicate query request", zap.String("requestID", requestID), zap.Stringer("origRecvTime", oldReq.receiveTime)) continue } - var qr gossipv1.QueryRequest - err = proto.Unmarshal(signedQueryRequest.QueryRequest, &qr) + var queryRequest gossipv1.QueryRequest + err = proto.Unmarshal(signedRequest.QueryRequest, &queryRequest) if err != nil { - qLogger.Error("failed to unmarshal query request", zap.String("requestor", signerAddress.Hex()), zap.Error(err)) + qLogger.Error("failed to unmarshal query request", zap.String("requestor", signerAddress.Hex()), zap.String("requestID", requestID), zap.Error(err)) continue } - if err := common.ValidateQueryRequest(&qr); err != nil { - qLogger.Error("received invalid message", zap.String("requestor", signerAddress.Hex()), zap.Error(err)) + if err := common.ValidateQueryRequest(&queryRequest); err != nil { + qLogger.Error("received invalid message", zap.String("requestor", signerAddress.Hex()), zap.String("requestID", requestID), zap.Error(err)) continue } - queryRequest := common.CreateQueryRequest(signedQueryRequest, &qr) + // Build the set of per chain queries and placeholders for the per chain responses. + errorFound := false + queries := []*perChainQuery{} + responses := make([]*common.PerChainQueryResponseInternal, len(queryRequest.PerChainQueries)) + receiveTime := time.Now() + + for requestIdx, pcq := range queryRequest.PerChainQueries { + chainID := vaa.ChainID(pcq.ChainId) + if _, exists := supportedChains[chainID]; !exists { + qLogger.Error("chain does not support cross chain queries", zap.String("requestID", requestID), zap.Stringer("chainID", chainID)) + errorFound = true + break + } - // Look up the channel for this chain. - channel, channelExists := chainQueryReqC[queryRequest.ChainID] - if !channelExists { - qLogger.Error("unknown chain ID for query request, dropping it", zap.String("requestID", queryRequest.RequestID), zap.Stringer("chain_id", queryRequest.ChainID)) - continue + channel, channelExists := chainQueryReqC[chainID] + if !channelExists { + qLogger.Error("unknown chain ID for query request, dropping it", zap.String("requestID", requestID), zap.Stringer("chain_id", chainID)) + errorFound = true + break + } + + queries = append(queries, &perChainQuery{ + req: &common.PerChainQueryInternal{ + RequestID: requestID, + RequestIdx: requestIdx, + ChainID: chainID, + Request: pcq, + }, + channel: channel, + }) } - // Make sure this is not a duplicate request. TODO: Should we do something smarter here than just dropping the duplicate? - if oldReq, exists := pendingQueries[queryRequest.RequestID]; exists { - qLogger.Warn("dropping duplicate query request", zap.String("requestID", queryRequest.RequestID), zap.Stringer("origRecvTime", oldReq.receiveTime)) + if errorFound { continue } - // Add the query to our cache. + // Create the pending query and add it to the cache. pq := &pendingQuery{ - req: queryRequest, - channel: channel, - receiveTime: time.Now(), - inProgress: true, + signedRequest: signedRequest, + request: &queryRequest, + requestID: requestID, + receiveTime: receiveTime, + queries: queries, + responses: responses, } - pendingQueries[queryRequest.RequestID] = pq + pendingQueries[requestID] = pq - // Forward the request to the watcher. - ccqForwardToWatcher(qLogger, pq) + // Forward the requests to the watchers. + for _, pcq := range pq.queries { + pcq.ccqForwardToWatcher(qLogger, pq.receiveTime) + } - case resp := <-queryResponseReadC: + case resp := <-queryResponseReadC: // Response from a watcher. if resp.Status == common.QuerySuccess { - if resp.Result == nil { - qLogger.Error("received a successful query response with a nil result, dropping it!", zap.String("requestID", resp.RequestID)) + if len(resp.Results) == 0 { + qLogger.Error("received a successful query response with no results, dropping it!", zap.String("requestID", resp.RequestID)) continue } + pq, exists := pendingQueries[resp.RequestID] + if !exists { + qLogger.Warn("received a success response with no outstanding query, dropping it", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) + continue + } + + if resp.RequestIdx >= len(pq.responses) { + qLogger.Error("received a response with an invalid index", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) + continue + } + + // Store the result, which will mark this per-chain query as completed. + pq.responses[resp.RequestIdx] = resp + + // If we still have other outstanding per chain queries for this request, keep waiting. + numStillPending := pq.numPendingRequests() + if numStillPending > 0 { + qLogger.Info("received a per chain query response, still waiting for more", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx), zap.Int("numStillPending", numStillPending)) + continue + } else { + qLogger.Info("received final per chain query response, ready to publish", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) + } + + // Build the list of per chain response publications and the overall query response publication. + responses := []common.PerChainQueryResponse{} + for _, resp := range pq.responses { + if resp == nil { + qLogger.Error("unexpected null response in pending query!", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) + continue + } + + responses = append(responses, common.PerChainQueryResponse{ + ChainID: uint32(resp.ChainID), + Responses: resp.Results, + }) + } + respPub := &common.QueryResponsePublication{ - Request: resp.SignedRequest, - Response: *resp.Result, + Request: pq.signedRequest, + PerChainResponses: responses, } // Send the response to be published. select { case queryResponseWriteC <- respPub: - qLogger.Debug("forwarded query response to p2p", zap.String("requestID", resp.RequestID)) + qLogger.Info("forwarded query response to p2p", zap.String("requestID", resp.RequestID)) delete(pendingQueries, resp.RequestID) default: - if pq, exists := pendingQueries[resp.RequestID]; exists { - qLogger.Warn("failed to publish query response to p2p, will retry publishing next interval", zap.String("requestID", resp.RequestID)) - pq.respPub = respPub - pq.inProgress = false - } else { - qLogger.Warn("failed to publish query response to p2p, request is no longer in cache, dropping it", zap.String("requestID", resp.RequestID)) - delete(pendingQueries, resp.RequestID) - } + qLogger.Warn("failed to publish query response to p2p, will retry publishing next interval", zap.String("requestID", resp.RequestID)) + pq.respPub = respPub } } else if resp.Status == common.QueryRetryNeeded { - if pq, exists := pendingQueries[resp.RequestID]; exists { - qLogger.Warn("query failed, will retry next interval", zap.String("requestID", resp.RequestID)) - pq.inProgress = false + if _, exists := pendingQueries[resp.RequestID]; exists { + qLogger.Warn("query failed, will retry next interval", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) } else { - qLogger.Warn("query failed, request is no longer in cache, dropping it", zap.String("requestID", resp.RequestID)) + qLogger.Warn("received a retry needed response with no outstanding query, dropping it", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) } } else if resp.Status == common.QueryFatalError { - qLogger.Error("query encountered a fatal error, dropping it", zap.String("requestID", resp.RequestID)) + qLogger.Warn("received a fatal error response, dropping the whole request", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx)) delete(pendingQueries, resp.RequestID) } else { - qLogger.Error("received an unexpected query status, dropping it", zap.String("requestID", resp.RequestID), zap.Int("status", int(resp.Status))) + qLogger.Warn("received an unexpected query status, dropping the whole request", zap.String("requestID", resp.RequestID), zap.Int("requestIdx", resp.RequestIdx), zap.Int("status", int(resp.Status))) delete(pendingQueries, resp.RequestID) } - case <-ticker.C: + case <-ticker.C: // Retry audit timer. now := time.Now() for reqId, pq := range pendingQueries { - timeout := pq.receiveTime.Add(requestTimeout) - qLogger.Debug("audit", zap.String("requestId", reqId), zap.Stringer("receiveTime", pq.receiveTime), zap.Stringer("retryTime", pq.lastUpdateTime.Add(retryInterval)), zap.Stringer("timeout", timeout)) + timeout := pq.receiveTime.Add(requestTimeoutImpl) + qLogger.Debug("audit", zap.String("requestId", reqId), zap.Stringer("receiveTime", pq.receiveTime), zap.Stringer("timeout", timeout)) if timeout.Before(now) { qLogger.Warn("query request timed out, dropping it", zap.String("requestId", reqId), zap.Stringer("receiveTime", pq.receiveTime)) delete(pendingQueries, reqId) @@ -187,10 +295,13 @@ func handleQueryRequests( default: qLogger.Warn("resend of query response to p2p failed again, will keep retrying", zap.String("requestID", reqId)) } - } else if !pq.inProgress && pq.lastUpdateTime.Add(retryInterval).Before(now) { - qLogger.Info("retrying query request", zap.String("requestId", reqId), zap.Stringer("receiveTime", pq.receiveTime)) - pq.inProgress = true - ccqForwardToWatcher(qLogger, pq) + } else { + for requestIdx, pcq := range pq.queries { + if pq.responses[requestIdx] == nil && pcq.lastUpdateTime.Add(retryIntervalImpl).Before(now) { + qLogger.Info("retrying query request", zap.String("requestId", reqId), zap.Int("requestIdx", requestIdx), zap.Stringer("receiveTime", pq.receiveTime), zap.Stringer("lastUpdateTime", pcq.lastUpdateTime)) + pcq.ccqForwardToWatcher(qLogger, pq.receiveTime) + } + } } } } @@ -223,15 +334,26 @@ func ccqParseAllowedRequesters(ccqAllowedRequesters string) (map[ethCommon.Addre // ccqForwardToWatcher submits a query request to the appropriate watcher. It updates the request object if the write succeeds. // If the write fails, it does not update the last update time, which will cause a retry next interval (until it times out) -func ccqForwardToWatcher(qLogger *zap.Logger, pq *pendingQuery) { +func (pcq *perChainQuery) ccqForwardToWatcher(qLogger *zap.Logger, receiveTime time.Time) { select { // TODO: only send the query request itself and reassemble in this module - case pq.channel <- pq.req: - qLogger.Debug("forwarded query request to watcher", zap.String("requestID", pq.req.RequestID), zap.Stringer("chainID", pq.req.ChainID)) - pq.lastUpdateTime = pq.receiveTime + case pcq.channel <- pcq.req: + qLogger.Debug("forwarded query request to watcher", zap.String("requestID", pcq.req.RequestID), zap.Stringer("chainID", pcq.req.ChainID)) + pcq.lastUpdateTime = receiveTime default: - // By leaving lastUpdateTime unset and setting inProgress to false, we will retry next interval. - qLogger.Warn("failed to send query request to watcher, will retry next interval", zap.String("requestID", pq.req.RequestID), zap.Stringer("chain_id", pq.req.ChainID)) - pq.inProgress = false + // By leaving lastUpdateTime unset, we will retry next interval. + qLogger.Warn("failed to send query request to watcher, will retry next interval", zap.String("requestID", pcq.req.RequestID), zap.Stringer("chain_id", pcq.req.ChainID)) + } +} + +// numPendingRequests returns the number of per chain queries in a request that are still awaiting responses. Zero means the request can now be published. +func (pq *pendingQuery) numPendingRequests() int { + numPending := 0 + for _, resp := range pq.responses { + if resp == nil { + numPending += 1 + } } + + return numPending } diff --git a/node/cmd/guardiand/query_test.go b/node/cmd/guardiand/query_test.go new file mode 100644 index 0000000000..bd4d0cb8d3 --- /dev/null +++ b/node/cmd/guardiand/query_test.go @@ -0,0 +1,725 @@ +package guardiand + +import ( + "bytes" + "context" + "crypto/ecdsa" + "encoding/hex" + "fmt" + "math" + "math/big" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/certusone/wormhole/node/pkg/common" + gossipv1 "github.com/certusone/wormhole/node/pkg/proto/gossip/v1" + "github.com/wormhole-foundation/wormhole/sdk/vaa" + + ethCommon "github.com/ethereum/go-ethereum/common" + ethCrypto "github.com/ethereum/go-ethereum/crypto" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.uber.org/zap" + "google.golang.org/protobuf/proto" +) + +const ( + testSigner = "beFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe" + + // Magic retry values used to cause special behavior in the watchers. + fatalError = math.MaxInt + ignoreQuery = math.MaxInt - 1 + + // Speed things up for testing purposes. + requestTimeoutForTest = 100 * time.Millisecond + retryIntervalForTest = 10 * time.Millisecond + pollIntervalForTest = 5 * time.Millisecond +) + +var ( + nonce = uint32(0) + + watcherChainsForTest = []vaa.ChainID{vaa.ChainIDPolygon, vaa.ChainIDBSC} +) + +// createPerChainQueryForTesting creates a per chain query for use in tests. The To and Data fields are meaningless gibberish, not ABI. +func createPerChainQueryForTesting( + chainId vaa.ChainID, + block string, + numCalls int, +) *gossipv1.PerChainQueryRequest { + callData := []*gossipv1.EthCallQueryRequest_EthCallData{} + for count := 0; count < numCalls; count++ { + callData = append(callData, &gossipv1.EthCallQueryRequest_EthCallData{ + To: []byte(fmt.Sprintf("%-20s", fmt.Sprintf("To for %d:%d", chainId, count))), + Data: []byte(fmt.Sprintf("CallData for %d:%d", chainId, count)), + }) + } + + callRequest := &gossipv1.EthCallQueryRequest{ + Block: block, + CallData: callData, + } + + return &gossipv1.PerChainQueryRequest{ + ChainId: uint32(chainId), + Message: &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: callRequest, + }, + } +} + +// createSignedQueryRequestForTesting creates a query request object and signs it using the specified key. +func createSignedQueryRequestForTesting( + sk *ecdsa.PrivateKey, + perChainQueries []*gossipv1.PerChainQueryRequest, +) (*gossipv1.SignedQueryRequest, *gossipv1.QueryRequest) { + nonce += 1 + queryRequest := &gossipv1.QueryRequest{ + Nonce: nonce, + PerChainQueries: perChainQueries, + } + + queryRequestBytes, err := proto.Marshal(queryRequest) + if err != nil { + panic(err) + } + + digest := common.QueryRequestDigest(common.UnsafeDevNet, queryRequestBytes) + sig, err := ethCrypto.Sign(digest.Bytes(), sk) + if err != nil { + panic(err) + } + + signedQueryRequest := &gossipv1.SignedQueryRequest{ + QueryRequest: queryRequestBytes, + Signature: sig, + } + + return signedQueryRequest, queryRequest +} + +// createExpectedResultsForTest generates an array of the results expected for a request. These results are returned by the watcher, and used to validate the response. +func createExpectedResultsForTest(perChainQueries []*gossipv1.PerChainQueryRequest) []common.PerChainQueryResponse { + expectedResults := []common.PerChainQueryResponse{} + for _, pcq := range perChainQueries { + switch req := pcq.Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + now := time.Now() + blockNum, err := strconv.ParseInt(strings.TrimPrefix(req.EthCallQueryRequest.Block, "0x"), 16, 64) + if err != nil { + panic("invalid blockNum!") + } + resp := []common.EthCallQueryResponse{} + for _, cd := range req.EthCallQueryRequest.CallData { + resp = append(resp, common.EthCallQueryResponse{ + Number: big.NewInt(blockNum), + Hash: ethCommon.HexToHash("0x9999bac44d09a7f69ee7941819b0a19c59ccb1969640cc513be09ef95ed2d8e2"), + Time: timeForTest(timeForTest(now)), + Result: []byte(hex.EncodeToString(cd.To) + ":" + hex.EncodeToString(cd.Data)), + }) + } + expectedResults = append(expectedResults, common.PerChainQueryResponse{ + ChainID: pcq.ChainId, + Responses: resp, + }) + + default: + panic("Invalid call data type!") + } + } + + return expectedResults +} + +// validateResponseForTest performs validation on the responses generated by these tests. Note that it is not a generalized validate function. +func validateResponseForTest( + t *testing.T, + response *common.QueryResponsePublication, + signedRequest *gossipv1.SignedQueryRequest, + queryRequest *gossipv1.QueryRequest, + expectedResults []common.PerChainQueryResponse, +) bool { + require.NotNil(t, response) + require.True(t, common.SignedQueryRequestEqual(signedRequest, response.Request)) + require.Equal(t, len(queryRequest.PerChainQueries), len(response.PerChainResponses)) + require.True(t, bytes.Equal(response.Request.Signature, signedRequest.Signature)) + require.Equal(t, len(response.PerChainResponses), len(expectedResults)) + for idx := range response.PerChainResponses { + require.True(t, response.PerChainResponses[idx].Equal(&expectedResults[idx])) + } + + return true +} + +// A timestamp has nanos, but we only marshal down to micros, so trim our time to micros for testing purposes. +func timeForTest(t time.Time) time.Time { + return time.UnixMicro(t.UnixMicro()) +} + +func TestCcqParseAllowedRequestersSuccess(t *testing.T) { + ccqAllowedRequestersList, err := ccqParseAllowedRequesters(testSigner) + require.NoError(t, err) + require.NotNil(t, ccqAllowedRequestersList) + require.Equal(t, 1, len(ccqAllowedRequestersList)) + + _, exists := ccqAllowedRequestersList[ethCommon.BytesToAddress(ethCommon.Hex2Bytes(testSigner))] + require.True(t, exists) + _, exists = ccqAllowedRequestersList[ethCommon.BytesToAddress(ethCommon.Hex2Bytes("beFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBf"))] + require.False(t, exists) + + ccqAllowedRequestersList, err = ccqParseAllowedRequesters("beFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe,beFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBf") + require.NoError(t, err) + require.NotNil(t, ccqAllowedRequestersList) + require.Equal(t, 2, len(ccqAllowedRequestersList)) + + _, exists = ccqAllowedRequestersList[ethCommon.BytesToAddress(ethCommon.Hex2Bytes(testSigner))] + require.True(t, exists) + _, exists = ccqAllowedRequestersList[ethCommon.BytesToAddress(ethCommon.Hex2Bytes("beFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBf"))] + require.True(t, exists) +} + +func TestCcqParseAllowedRequestersFailsIfParameterEmpty(t *testing.T) { + ccqAllowedRequestersList, err := ccqParseAllowedRequesters("") + require.Error(t, err) + require.Nil(t, ccqAllowedRequestersList) + + ccqAllowedRequestersList, err = ccqParseAllowedRequesters(",") + require.Error(t, err) + require.Nil(t, ccqAllowedRequestersList) +} + +func TestCcqParseAllowedRequestersFailsIfInvalidParameter(t *testing.T) { + ccqAllowedRequestersList, err := ccqParseAllowedRequesters("Hello") + require.Error(t, err) + require.Nil(t, ccqAllowedRequestersList) +} + +// mockData is the data structure used to mock up the query handler environment. +type mockData struct { + sk *ecdsa.PrivateKey + + signedQueryReqReadC <-chan *gossipv1.SignedQueryRequest + signedQueryReqWriteC chan<- *gossipv1.SignedQueryRequest + + chainQueryReqC map[vaa.ChainID]chan *common.PerChainQueryInternal + + queryResponseReadC <-chan *common.PerChainQueryResponseInternal + queryResponseWriteC chan<- *common.PerChainQueryResponseInternal + + queryResponsePublicationReadC <-chan *common.QueryResponsePublication + queryResponsePublicationWriteC chan<- *common.QueryResponsePublication + + mutex sync.Mutex + queryResponsePublication *common.QueryResponsePublication + expectedResults []common.PerChainQueryResponse + requestsPerChain map[vaa.ChainID]int + retriesPerChain map[vaa.ChainID]int +} + +// resetState() is used to reset mock data between queries in the same test. +func (md *mockData) resetState() { + md.mutex.Lock() + defer md.mutex.Unlock() + md.queryResponsePublication = nil + md.expectedResults = nil + md.requestsPerChain = make(map[vaa.ChainID]int) + md.retriesPerChain = make(map[vaa.ChainID]int) +} + +// setExpectedResults sets the results to be returned by the watchers. +func (md *mockData) setExpectedResults(expectedResults []common.PerChainQueryResponse) { + md.mutex.Lock() + defer md.mutex.Unlock() + md.expectedResults = expectedResults +} + +// setRetries allows a test to specify how many times a given watcher should retry before returning success. +// If the count is the special value `fatalError`, the watcher will return common.QueryFatalError. +func (md *mockData) setRetries(chainId vaa.ChainID, count int) { + md.mutex.Lock() + defer md.mutex.Unlock() + md.retriesPerChain[chainId] = count +} + +// incrementRequestsPerChainAlreadyLocked is used by the watchers to keep track of how many times they were invoked in a given test. +func (md *mockData) incrementRequestsPerChainAlreadyLocked(chainId vaa.ChainID) { + if val, exists := md.requestsPerChain[chainId]; exists { + md.requestsPerChain[chainId] = val + 1 + } else { + md.requestsPerChain[chainId] = 1 + } +} + +// getQueryResponsePublication returns the latest query response publication received by the mock. +func (md *mockData) getQueryResponsePublication() *common.QueryResponsePublication { + md.mutex.Lock() + defer md.mutex.Unlock() + return md.queryResponsePublication +} + +// getRequestsPerChain returns the count of the number of times the given watcher was invoked in a given test. +func (md *mockData) getRequestsPerChain(chainId vaa.ChainID) int { + md.mutex.Lock() + defer md.mutex.Unlock() + if ret, exists := md.requestsPerChain[chainId]; exists { + return ret + } + return 0 +} + +// shouldIgnoreAlreadyLocked is used by the watchers to see if they should ignore a query (causing a retry). +func (md *mockData) shouldIgnoreAlreadyLocked(chainId vaa.ChainID) bool { + if val, exists := md.retriesPerChain[chainId]; exists { + if val == ignoreQuery { + delete(md.retriesPerChain, chainId) + return true + } + } + return false +} + +// getStatusAlreadyLocked is used by the watchers to determine what query status they should return, based on the `retriesPerChain`. +func (md *mockData) getStatusAlreadyLocked(chainId vaa.ChainID) common.QueryStatus { + if val, exists := md.retriesPerChain[chainId]; exists { + if val == fatalError { + return common.QueryFatalError + } + val -= 1 + if val > 0 { + md.retriesPerChain[chainId] = val + } else { + delete(md.retriesPerChain, chainId) + } + return common.QueryRetryNeeded + } + return common.QuerySuccess +} + +// createQueryHandlerForTest creates the query handler mock environment, including the set of watchers and the response listener. +// Most tests will use this function to set up the mock. +func createQueryHandlerForTest(t *testing.T, ctx context.Context, logger *zap.Logger, chains []vaa.ChainID) *mockData { + md := createQueryHandlerForTestWithoutPublisher(t, ctx, logger, chains) + md.startResponseListener(ctx) + return md +} + +// createQueryHandlerForTestWithoutPublisher creates the query handler mock environment, including the set of watchers but not the response listener. +// This function can be invoked directly to test retries of response publication (by delaying the start of the response listener). +func createQueryHandlerForTestWithoutPublisher(t *testing.T, ctx context.Context, logger *zap.Logger, chains []vaa.ChainID) *mockData { + md := mockData{} + var err error + + *unsafeDevMode = true + md.sk, err = loadGuardianKey("../../hack/query/dev.guardian.key") + require.NoError(t, err) + require.NotNil(t, md.sk) + + ccqAllowedRequestersList, err := ccqParseAllowedRequesters(testSigner) + require.NoError(t, err) + + // Inbound observation requests from the p2p service (for all chains) + md.signedQueryReqReadC, md.signedQueryReqWriteC = makeChannelPair[*gossipv1.SignedQueryRequest](common.SignedQueryRequestChannelSize) + + // Per-chain query requests + md.chainQueryReqC = make(map[vaa.ChainID]chan *common.PerChainQueryInternal) + for _, chainId := range chains { + md.chainQueryReqC[chainId] = make(chan *common.PerChainQueryInternal) + } + + // Query responses from watchers to query handler aggregated across all chains + md.queryResponseReadC, md.queryResponseWriteC = makeChannelPair[*common.PerChainQueryResponseInternal](0) + + // Query responses from query handler to p2p + md.queryResponsePublicationReadC, md.queryResponsePublicationWriteC = makeChannelPair[*common.QueryResponsePublication](0) + + md.resetState() + + go handleQueryRequestsImpl(ctx, logger, md.signedQueryReqReadC, md.chainQueryReqC, ccqAllowedRequestersList, + md.queryResponseReadC, md.queryResponsePublicationWriteC, common.GoTest, requestTimeoutForTest, retryIntervalForTest) + + // Create a routine for each configured watcher. It will take a per chain query and return the corresponding expected result. + // It also pegs a counter of the number of requests the watcher received, for verification purposes. + for chainId := range md.chainQueryReqC { + go func(chainId vaa.ChainID, chainQueryReqC <-chan *common.PerChainQueryInternal) { + for { + select { + case <-ctx.Done(): + return + case pcqr := <-chainQueryReqC: + require.Equal(t, chainId, pcqr.ChainID) + md.mutex.Lock() + md.incrementRequestsPerChainAlreadyLocked(chainId) + if md.shouldIgnoreAlreadyLocked(chainId) { + logger.Info("watcher ignoring query", zap.String("chainId", chainId.String()), zap.Int("requestIdx", pcqr.RequestIdx)) + } else { + results := md.expectedResults[pcqr.RequestIdx].Responses + result := md.getStatusAlreadyLocked(chainId) + logger.Info("watcher returning", zap.String("chainId", chainId.String()), zap.Int("requestIdx", pcqr.RequestIdx), zap.Int("result", int(result))) + queryResponse := common.CreatePerChainQueryResponseInternal(pcqr.RequestID, pcqr.RequestIdx, pcqr.ChainID, result, results) + md.queryResponseWriteC <- queryResponse + } + md.mutex.Unlock() + } + } + }(chainId, md.chainQueryReqC[chainId]) + } + + return &md +} + +// startResponseListener starts the response listener routine. It is called as part of the standard mock environment set up. Or, it can be used +// along with `createQueryHandlerForTestWithoutPublisher“ to test retries of response publication (by delaying the start of the response listener). +func (md *mockData) startResponseListener(ctx context.Context) { + go func() { + for { + select { + case <-ctx.Done(): + return + case qrp := <-md.queryResponsePublicationReadC: + md.mutex.Lock() + md.queryResponsePublication = qrp + md.mutex.Unlock() + } + } + }() +} + +// waitForResponse is used by the tests to wait for a response publication. It will eventually timeout if the query fails. +func (md *mockData) waitForResponse() *common.QueryResponsePublication { + for count := 0; count < 50; count++ { + time.Sleep(pollIntervalForTest) + ret := md.getQueryResponsePublication() + if ret != nil { + return ret + } + } + return nil +} + +// TestInvalidQueries tests all the obvious reasons why a query may fail (aside from watcher failures). +func TestInvalidQueries(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + var perChainQueries []*gossipv1.PerChainQueryRequest + var signedQueryRequest *gossipv1.SignedQueryRequest + + // Query with a bad signature should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + signedQueryRequest.Signature[0] += 1 // Corrupt the signature. + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Query for an unsupported chain should fail. The supported chains are defined in supportedChains in query.go + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDAlgorand, "0x28d9630", 2)} + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Query with no per-chain queries should fail. + md.resetState() + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, []*gossipv1.PerChainQueryRequest{}) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Query for an invalid chain should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + perChainQueries[0].ChainId = uint32(math.MaxUint16) + 1 // Corrupt the chain ID. + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Query for a chain that supports queries but that is not in the watcher channel map should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDSepolia, "0x28d9630", 2)} + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Query for "latest" should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + switch req := perChainQueries[0].Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + req.EthCallQueryRequest.Block = "latest" + } + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // A per-chain query with no call data should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 0)} + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Wrong length "To" contract should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + switch req := perChainQueries[0].Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + req.EthCallQueryRequest.CallData[0].To = req.EthCallQueryRequest.CallData[0].To[2:] + } + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) + + // Invalid type of per-chain query should fail. + md.resetState() + perChainQueries = []*gossipv1.PerChainQueryRequest{{ChainId: uint32(vaa.ChainIDPolygon)}} + signedQueryRequest, _ = createSignedQueryRequestForTesting(md.sk, perChainQueries) + md.signedQueryReqWriteC <- signedQueryRequest + require.Nil(t, md.waitForResponse()) +} + +func TestSingleQueryShouldSucceed(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // Wait until we receive a response or timeout. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} + +func TestBatchOfTwoQueriesShouldSucceed(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{ + createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2), + createPerChainQueryForTesting(vaa.ChainIDBSC, "0x28d9123", 3), + } + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // Wait until we receive a response or timeout. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDBSC)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} + +func TestQueryWithLimitedRetriesShouldSucceed(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Make it retry a couple of times, but not enough to make it fail. + retries := 2 + md.setRetries(vaa.ChainIDPolygon, retries) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // The request should eventually succeed. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, retries+1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} + +func TestQueryWithRetryDueToTimeoutShouldSucceed(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Make the first per chain query timeout, but the retry should succeed. + md.setRetries(vaa.ChainIDPolygon, ignoreQuery) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // The request should eventually succeed. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, 2, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} + +func TestQueryWithTooManyRetriesShouldFail(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{ + createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2), + createPerChainQueryForTesting(vaa.ChainIDBSC, "0x28d9123", 3), + } + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Make polygon retry a couple of times, but not enough to make it fail. + retriesForPolygon := 2 + md.setRetries(vaa.ChainIDPolygon, retriesForPolygon) + + // Make BSC retry so many times that the request times out. + md.setRetries(vaa.ChainIDBSC, 1000) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // The request should timeout. + queryResponsePublication := md.waitForResponse() + require.Nil(t, queryResponsePublication) + + assert.Equal(t, retriesForPolygon+1, md.getRequestsPerChain(vaa.ChainIDPolygon)) +} + +func TestQueryWithLimitedRetriesOnMultipleChainsShouldSucceed(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{ + createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2), + createPerChainQueryForTesting(vaa.ChainIDBSC, "0x28d9123", 3), + } + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Make both chains retry a couple of times, but not enough to make it fail. + retriesForPolygon := 2 + md.setRetries(vaa.ChainIDPolygon, retriesForPolygon) + + retriesForBSC := 3 + md.setRetries(vaa.ChainIDBSC, retriesForBSC) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // The request should eventually succeed. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, retriesForPolygon+1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.Equal(t, retriesForBSC+1, md.getRequestsPerChain(vaa.ChainIDBSC)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} + +func TestFatalErrorOnPerChainQueryShouldCauseRequestToFail(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTest(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{ + createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2), + createPerChainQueryForTesting(vaa.ChainIDBSC, "0x28d9123", 3), + } + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Make BSC return a fatal error. + md.setRetries(vaa.ChainIDBSC, fatalError) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // The request should timeout. + queryResponsePublication := md.waitForResponse() + require.Nil(t, queryResponsePublication) + + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDBSC)) +} + +func TestPublishRetrySucceeds(t *testing.T) { + ctx := context.Background() + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + md := createQueryHandlerForTestWithoutPublisher(t, ctx, logger, watcherChainsForTest) + + // Create the request and the expected results. Give the expected results to the mock. + perChainQueries := []*gossipv1.PerChainQueryRequest{createPerChainQueryForTesting(vaa.ChainIDPolygon, "0x28d9630", 2)} + signedQueryRequest, queryRequest := createSignedQueryRequestForTesting(md.sk, perChainQueries) + expectedResults := createExpectedResultsForTest(queryRequest.PerChainQueries) + md.setExpectedResults(expectedResults) + + // Submit the query request to the handler. + md.signedQueryReqWriteC <- signedQueryRequest + + // Sleep for a bit before we start listening for published results. + // If you look in the log, you should see one of these: "failed to publish query response to p2p, will retry publishing next interval" + // and at least one of these: "resend of query response to p2p failed again, will keep retrying". + time.Sleep(retryIntervalForTest * 3) + + // Now start the publisher routine. + // If you look in the log, you should see one of these: "resend of query response to p2p succeeded". + md.startResponseListener(ctx) + + // The response should still get published. + queryResponsePublication := md.waitForResponse() + require.NotNil(t, queryResponsePublication) + + assert.Equal(t, 1, md.getRequestsPerChain(vaa.ChainIDPolygon)) + assert.True(t, validateResponseForTest(t, queryResponsePublication, signedQueryRequest, queryRequest, expectedResults)) +} diff --git a/node/hack/query/send_req.go b/node/hack/query/send_req.go index a2bd529fa7..d01bf24874 100644 --- a/node/hack/query/send_req.go +++ b/node/hack/query/send_req.go @@ -10,10 +10,12 @@ import ( "encoding/hex" "fmt" "io" + "math/big" "os" "strings" "time" + "github.com/certusone/wormhole/node/hack/query/utils" "github.com/certusone/wormhole/node/pkg/common" "github.com/certusone/wormhole/node/pkg/p2p" gossipv1 "github.com/certusone/wormhole/node/pkg/proto/gossip/v1" @@ -32,6 +34,7 @@ import ( libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic" "github.com/multiformats/go-multiaddr" + "github.com/tendermint/tendermint/libs/rand" "go.uber.org/zap" "golang.org/x/crypto/openpgp/armor" //nolint "google.golang.org/protobuf/proto" @@ -177,32 +180,129 @@ func main() { panic(err) } - // methodName := "totalSupply" - methodName := "name" - data, err := wethAbi.Pack(methodName) + methods := []string{"name", "totalSupply"} + callData := []*gossipv1.EthCallQueryRequest_EthCallData{} + to, _ := hex.DecodeString("0d500b1d8e8ef31e21c99d1db9a6444d3adf1270") + + for _, method := range methods { + data, err := wethAbi.Pack(method) + if err != nil { + panic(err) + } + + callData = append(callData, &gossipv1.EthCallQueryRequest_EthCallData{ + To: to, + Data: data, + }) + } + + // Fetch the latest block number + url := "https://rpc.ankr.com/polygon" + logger.Info("Querying for latest block height", zap.String("url", url)) + blockNum, err := utils.FetchLatestBlockNumberFromUrl(ctx, url) if err != nil { - panic(err) + logger.Fatal("Failed to fetch latest block number", zap.Error(err)) } - to, _ := hex.DecodeString("0d500b1d8e8ef31e21c99d1db9a6444d3adf1270") + logger.Info("latest block", zap.String("num", blockNum.String()), zap.String("encoded", hexutil.EncodeBig(blockNum))) + // block := "0x28d9630" - block := "latest" + // block := "latest" // block := "0x9999bac44d09a7f69ee7941819b0a19c59ccb1969640cc513be09ef95ed2d8e2" + + // Start of query creation... callRequest := &gossipv1.EthCallQueryRequest{ - To: to, - Data: data, - Block: block, + Block: hexutil.EncodeBig(blockNum), + CallData: callData, + } + + // Send 2 individual requests for the same thing but 5 blocks apart + // First request... + logger.Info("calling sendQueryAndGetRsp for ", zap.String("blockNum", blockNum.String())) + queryRequest := createQueryRequest(callRequest) + sendQueryAndGetRsp(queryRequest, sk, th, ctx, logger, sub, wethAbi, methods) + + // This is just so that when I look at the output, it is easier for me. (Paul) + logger.Info("sleeping for 5 seconds") + time.Sleep(time.Second * 5) + + // Second request... + blockNum = blockNum.Sub(blockNum, big.NewInt(5)) + callRequest2 := &gossipv1.EthCallQueryRequest{ + Block: hexutil.EncodeBig(blockNum), + CallData: callData, + } + queryRequest2 := createQueryRequest(callRequest2) + logger.Info("calling sendQueryAndGetRsp for ", zap.String("blockNum", blockNum.String())) + sendQueryAndGetRsp(queryRequest2, sk, th, ctx, logger, sub, wethAbi, methods) + + // Now, want to send a single query with multiple requests... + logger.Info("Starting multiquery test in 5...") + time.Sleep(time.Second * 5) + multiCallRequest := []*gossipv1.EthCallQueryRequest{callRequest, callRequest2} + multQueryRequest := createQueryRequestWithMultipleRequests(multiCallRequest) + sendQueryAndGetRsp(multQueryRequest, sk, th, ctx, logger, sub, wethAbi, methods) + + // Cleanly shutdown + // Without this the same host won't properly discover peers until some timeout + sub.Cancel() + if err := th.Close(); err != nil { + logger.Fatal("Error closing the topic", zap.Error(err)) } + if err := h.Close(); err != nil { + logger.Fatal("Error closing the host", zap.Error(err)) + } + + // + // END SHUTDOWN + // + + logger.Info("Success! All tests passed!") +} + +const ( + GuardianKeyArmoredBlock = "WORMHOLE GUARDIAN PRIVATE KEY" +) + +func createQueryRequest(callRequest *gossipv1.EthCallQueryRequest) *gossipv1.QueryRequest { queryRequest := &gossipv1.QueryRequest{ - ChainId: 5, - Nonce: 0, - Message: &gossipv1.QueryRequest_EthCallQueryRequest{ - EthCallQueryRequest: callRequest}} + Nonce: rand.Uint32(), + PerChainQueries: []*gossipv1.PerChainQueryRequest{ + { + ChainId: 5, + Message: &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: callRequest, + }, + }, + }, + } + return queryRequest +} + +func createQueryRequestWithMultipleRequests(callRequests []*gossipv1.EthCallQueryRequest) *gossipv1.QueryRequest { + perChainQueries := []*gossipv1.PerChainQueryRequest{} + for _, req := range callRequests { + perChainQueries = append(perChainQueries, &gossipv1.PerChainQueryRequest{ + ChainId: 5, + Message: &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: req, + }, + }) + } + + queryRequest := &gossipv1.QueryRequest{ + Nonce: rand.Uint32(), + PerChainQueries: perChainQueries, + } + return queryRequest +} +func sendQueryAndGetRsp(queryRequest *gossipv1.QueryRequest, sk *ecdsa.PrivateKey, th *pubsub.Topic, ctx context.Context, logger *zap.Logger, sub *pubsub.Subscription, wethAbi abi.ABI, methods []string) { queryRequestBytes, err := proto.Marshal(queryRequest) if err != nil { panic(err) } + numQueries := len(queryRequest.PerChainQueries) // Sign the query request using our private key. digest := common.QueryRequestDigest(common.UnsafeDevNet, queryRequestBytes) @@ -261,14 +361,32 @@ func main() { // TODO: verify response signature isMatchingResponse = true - result, err := wethAbi.Methods[methodName].Outputs.Unpack(response.Response.Result) - if err != nil { - logger.Warn("failed to unpack result", zap.Error(err)) + if len(response.PerChainResponses) != numQueries { + logger.Warn("unexpected number of per chain query responses", zap.Int("expectedNum", numQueries), zap.Int("actualNum", len(response.PerChainResponses))) break } - - resultStr := hexutil.Encode(response.Response.Result) - logger.Info("found matching response", zap.String("number", response.Response.Number.String()), zap.String("hash", response.Response.Hash.String()), zap.String("time", response.Response.Time.String()), zap.Any("resultDecoded", result), zap.String("resultStr", resultStr)) + // Do double loop over responses + for index, pcq := range response.PerChainResponses { + logger.Info("per chain query response index", zap.Int("index", index)) + + localCallData := queryRequest.PerChainQueries[index].GetEthCallQueryRequest().GetCallData() + + if len(pcq.Responses) != len(localCallData) { + logger.Warn("unexpected number of results", zap.Int("expectedNum", len(localCallData)), zap.Int("expectedNum", len(pcq.Responses))) + break + } + + for idx, resp := range pcq.Responses { + result, err := wethAbi.Methods[methods[idx]].Outputs.Unpack(resp.Result) + if err != nil { + logger.Warn("failed to unpack result", zap.Error(err)) + break + } + + resultStr := hexutil.Encode(resp.Result) + logger.Info("found matching response", zap.Int("idx", idx), zap.String("number", resp.Number.String()), zap.String("hash", resp.Hash.String()), zap.String("time", resp.Time.String()), zap.String("method", methods[idx]), zap.Any("resultDecoded", result), zap.String("resultStr", resultStr)) + } + } } default: continue @@ -277,32 +395,8 @@ func main() { break } } - - // - // BEGIN SHUTDOWN - // - - // Cleanly shutdown - // Without this the same host won't properly discover peers until some timeout - sub.Cancel() - if err := th.Close(); err != nil { - logger.Fatal("Error closing the topic", zap.Error(err)) - } - if err := h.Close(); err != nil { - logger.Fatal("Error closing the host", zap.Error(err)) - } - - // - // END SHUTDOWN - // - - logger.Info("Success! All tests passed!") } -const ( - GuardianKeyArmoredBlock = "WORMHOLE GUARDIAN PRIVATE KEY" -) - // loadGuardianKey loads a serialized guardian key from disk. func loadGuardianKey(filename string) (*ecdsa.PrivateKey, error) { f, err := os.Open(filename) diff --git a/node/hack/query/test/query_test.go b/node/hack/query/test/query_test.go index d0c04b1401..a8a0c7559f 100644 --- a/node/hack/query/test/query_test.go +++ b/node/hack/query/test/query_test.go @@ -184,16 +184,30 @@ func TestCrossChainQuery(t *testing.T) { panic(err) } to, _ := hex.DecodeString("DDb64fE46a91D46ee29420539FC25FD07c5FEa3E") // WETH + + callData := []*gossipv1.EthCallQueryRequest_EthCallData{ + { + To: to, + Data: data, + }, + } + callRequest := &gossipv1.EthCallQueryRequest{ - To: to, - Data: data, - Block: hexutil.EncodeBig(blockNum), + Block: hexutil.EncodeBig(blockNum), + CallData: callData, } + queryRequest := &gossipv1.QueryRequest{ - ChainId: 2, - Nonce: 0, - Message: &gossipv1.QueryRequest_EthCallQueryRequest{ - EthCallQueryRequest: callRequest}} + Nonce: 1, + PerChainQueries: []*gossipv1.PerChainQueryRequest{ + { + ChainId: 2, + Message: &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: callRequest, + }, + }, + }, + } queryRequestBytes, err := proto.Marshal(queryRequest) if err != nil { @@ -278,13 +292,28 @@ func TestCrossChainQuery(t *testing.T) { continue } - result, err := wethAbi.Methods[methodName].Outputs.Unpack(response.Response.Result) - if err != nil { - logger.Fatal("failed to unpack result", zap.Error(err)) + if len(response.PerChainResponses) != 1 { + logger.Warn("unexpected number of per chain query responses", zap.Int("expectedNum", 1), zap.Int("actualNum", len(response.PerChainResponses))) + break + } + + pcq := response.PerChainResponses[0] + + if len(pcq.Responses) == 0 { + logger.Warn("response did not contain any results", zap.Error(err)) + break } - resultStr := hexutil.Encode(response.Response.Result) - logger.Info("found matching response", zap.String("number", response.Response.Number.String()), zap.String("hash", response.Response.Hash.String()), zap.String("time", response.Response.Time.String()), zap.Any("resultDecoded", result), zap.String("resultStr", resultStr)) + for idx, resp := range pcq.Responses { + result, err := wethAbi.Methods[methodName].Outputs.Unpack(resp.Result) + if err != nil { + logger.Warn("failed to unpack result", zap.Error(err)) + break + } + + resultStr := hexutil.Encode(resp.Result) + logger.Info("found matching response", zap.Int("idx", idx), zap.String("number", resp.Number.String()), zap.String("hash", resp.Hash.String()), zap.String("time", resp.Time.String()), zap.Any("resultDecoded", result), zap.String("resultStr", resultStr)) + } success = true } diff --git a/node/hack/query/utils/fetchCurrentGuardianSet.go b/node/hack/query/utils/fetchCurrentGuardianSet.go index 9e6e2152c0..67d40ee0f8 100644 --- a/node/hack/query/utils/fetchCurrentGuardianSet.go +++ b/node/hack/query/utils/fetchCurrentGuardianSet.go @@ -35,6 +35,10 @@ func FetchLatestBlockNumber(ctx context.Context, network common.Environment) (*b if rawUrl == "" { return nil, fmt.Errorf("unable to get rpc url") } + return FetchLatestBlockNumberFromUrl(ctx, rawUrl) +} + +func FetchLatestBlockNumberFromUrl(ctx context.Context, rawUrl string) (*big.Int, error) { rawClient, err := ethRpc.DialContext(ctx, rawUrl) if err != nil { return nil, fmt.Errorf("unable to dial eth context: %w", err) diff --git a/node/pkg/common/queryRequest.go b/node/pkg/common/queryRequest.go index f96be4cbb7..d3e0045458 100644 --- a/node/pkg/common/queryRequest.go +++ b/node/pkg/common/queryRequest.go @@ -1,7 +1,8 @@ package common import ( - "encoding/hex" + "bytes" + "encoding/binary" "fmt" "math" "strings" @@ -14,23 +15,14 @@ import ( ) const SignedQueryRequestChannelSize = 50 +const EvmContractAddressLength = 20 -// QueryRequest is an internal representation of a query request. -type QueryRequest struct { - SignedRequest *gossipv1.SignedQueryRequest - Request *gossipv1.QueryRequest - RequestID string - ChainID vaa.ChainID -} - -// CreateQueryRequest creates a QueryRequest object from the signed query request. -func CreateQueryRequest(signedRequest *gossipv1.SignedQueryRequest, request *gossipv1.QueryRequest) *QueryRequest { - return &QueryRequest{ - SignedRequest: signedRequest, - Request: request, - RequestID: hex.EncodeToString(signedRequest.Signature), - ChainID: vaa.ChainID(request.ChainId), - } +// PerChainQueryInternal is an internal representation of a query request that is passed to the watcher. +type PerChainQueryInternal struct { + RequestID string + RequestIdx int + ChainID vaa.ChainID + Request *gossipv1.PerChainQueryRequest } // QueryRequestDigest returns the query signing prefix based on the environment. @@ -58,28 +50,233 @@ func PostSignedQueryRequest(signedQueryReqSendC chan<- *gossipv1.SignedQueryRequ } } +// MarshalQueryRequest serializes the binary representation of a query request +func MarshalQueryRequest(queryRequest *gossipv1.QueryRequest) ([]byte, error) { + buf := new(bytes.Buffer) + + vaa.MustWrite(buf, binary.BigEndian, queryRequest.Nonce) // uint32 + + vaa.MustWrite(buf, binary.BigEndian, uint8(len(queryRequest.PerChainQueries))) + for _, perChainQuery := range queryRequest.PerChainQueries { + pcqBuf, err := MarshalPerChainQueryRequest(perChainQuery) + if err != nil { + return nil, fmt.Errorf("failed to marshal per chain query") + } + buf.Write(pcqBuf) + } + + return buf.Bytes(), nil +} + +// MarshalQueryRequest serializes the binary representation of a per chain query request +func MarshalPerChainQueryRequest(perChainQuery *gossipv1.PerChainQueryRequest) ([]byte, error) { + buf := new(bytes.Buffer) + switch req := perChainQuery.Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + vaa.MustWrite(buf, binary.BigEndian, QUERY_REQUEST_TYPE_ETH_CALL) + vaa.MustWrite(buf, binary.BigEndian, uint16(perChainQuery.ChainId)) + vaa.MustWrite(buf, binary.BigEndian, uint32(len(req.EthCallQueryRequest.Block))) + buf.Write([]byte(req.EthCallQueryRequest.Block)) + vaa.MustWrite(buf, binary.BigEndian, uint8(len(req.EthCallQueryRequest.CallData))) + for _, callData := range req.EthCallQueryRequest.CallData { + buf.Write(callData.To) + vaa.MustWrite(buf, binary.BigEndian, uint32(len(callData.Data))) + buf.Write(callData.Data) + } + default: + return nil, fmt.Errorf("invalid request type") + } + return buf.Bytes(), nil +} + +// UnmarshalQueryRequest deserializes the binary representation of a query request from a byte array +func UnmarshalQueryRequest(data []byte) (*gossipv1.QueryRequest, error) { + reader := bytes.NewReader(data[:]) + return UnmarshalQueryRequestFromReader(reader) +} + +// UnmarshalQueryRequestFromReader deserializes the binary representation of a query request from an existing reader +func UnmarshalQueryRequestFromReader(reader *bytes.Reader) (*gossipv1.QueryRequest, error) { + queryRequest := &gossipv1.QueryRequest{} + + queryNonce := uint32(0) + if err := binary.Read(reader, binary.BigEndian, &queryNonce); err != nil { + return nil, fmt.Errorf("failed to read request nonce: %w", err) + } + queryRequest.Nonce = queryNonce + + numPerChainQueries := uint8(0) + if err := binary.Read(reader, binary.BigEndian, &numPerChainQueries); err != nil { + return nil, fmt.Errorf("failed to read number of per chain queries: %w", err) + } + + for count := 0; count < int(numPerChainQueries); count++ { + perChainQuery, err := UnmarshalPerChainQueryRequestFromReader(reader) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal per chain query: %w", err) + } + queryRequest.PerChainQueries = append(queryRequest.PerChainQueries, perChainQuery) + } + + return queryRequest, nil +} + +// UnmarshalPerChainQueryRequest deserializes the binary representation of a per chain query request from a byte array +func UnmarshalPerChainQueryRequest(data []byte) (*gossipv1.PerChainQueryRequest, error) { + reader := bytes.NewReader(data[:]) + return UnmarshalPerChainQueryRequestFromReader(reader) +} + +// UnmarshalPerChainQueryRequestFromReader deserializes the binary representation of a per chain query request from an existing reader +func UnmarshalPerChainQueryRequestFromReader(reader *bytes.Reader) (*gossipv1.PerChainQueryRequest, error) { + perChainQuery := &gossipv1.PerChainQueryRequest{} + + requestType := uint8(0) + if err := binary.Read(reader, binary.BigEndian, &requestType); err != nil { + return nil, fmt.Errorf("failed to read request chain: %w", err) + } + if requestType != QUERY_REQUEST_TYPE_ETH_CALL { + // TODO: support reading different types of request/response pairs + return nil, fmt.Errorf("unsupported request type: %d", requestType) + } + + queryChain := vaa.ChainID(0) + if err := binary.Read(reader, binary.BigEndian, &queryChain); err != nil { + return nil, fmt.Errorf("failed to read request chain: %w", err) + } + perChainQuery.ChainId = uint32(queryChain) + + ethCallQueryRequest := &gossipv1.EthCallQueryRequest{} + + queryEthCallBlockLen := uint32(0) + if err := binary.Read(reader, binary.BigEndian, &queryEthCallBlockLen); err != nil { + return nil, fmt.Errorf("failed to read call Data len: %w", err) + } + queryEthCallBlockBytes := make([]byte, queryEthCallBlockLen) + if n, err := reader.Read(queryEthCallBlockBytes[:]); err != nil || n != int(queryEthCallBlockLen) { + return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) + } + ethCallQueryRequest.Block = string(queryEthCallBlockBytes[:]) + + numCallData := uint8(0) + if err := binary.Read(reader, binary.BigEndian, &numCallData); err != nil { + return nil, fmt.Errorf("failed to read number of call data entries: %w", err) + } + + for count := 0; count < int(numCallData); count++ { + queryEthCallTo := [EvmContractAddressLength]byte{} + if n, err := reader.Read(queryEthCallTo[:]); err != nil || n != EvmContractAddressLength { + return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) + } + + queryEthCallDataLen := uint32(0) + if err := binary.Read(reader, binary.BigEndian, &queryEthCallDataLen); err != nil { + return nil, fmt.Errorf("failed to read call Data len: %w", err) + } + queryEthCallData := make([]byte, queryEthCallDataLen) + if n, err := reader.Read(queryEthCallData[:]); err != nil || n != int(queryEthCallDataLen) { + return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) + } + + callData := &gossipv1.EthCallQueryRequest_EthCallData{ + To: queryEthCallTo[:], + Data: queryEthCallData[:], + } + + ethCallQueryRequest.CallData = append(ethCallQueryRequest.CallData, callData) + } + + perChainQuery.Message = &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: ethCallQueryRequest, + } + + return perChainQuery, nil +} + // ValidateQueryRequest does basic validation on a received query request. func ValidateQueryRequest(queryRequest *gossipv1.QueryRequest) error { - if queryRequest.ChainId > math.MaxUint16 { - return fmt.Errorf("invalid chain id: %d is out of bounds", queryRequest.ChainId) + if len(queryRequest.PerChainQueries) == 0 { + return fmt.Errorf("request does not contain any queries") } - switch req := queryRequest.Message.(type) { - case *gossipv1.QueryRequest_EthCallQueryRequest: - if len(req.EthCallQueryRequest.To) != 20 { - return fmt.Errorf("invalid length for To contract") + for _, perChainQuery := range queryRequest.PerChainQueries { + if perChainQuery.ChainId > math.MaxUint16 { + return fmt.Errorf("invalid chain id: %d is out of bounds", perChainQuery.ChainId) } - if len(req.EthCallQueryRequest.Data) > math.MaxUint32 { - return fmt.Errorf("request data too long") + switch req := perChainQuery.Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + if len(req.EthCallQueryRequest.Block) > math.MaxUint32 { + return fmt.Errorf("request block too long") + } + if !strings.HasPrefix(req.EthCallQueryRequest.Block, "0x") { + return fmt.Errorf("request block must be a hex number or hash starting with 0x") + } + if len(req.EthCallQueryRequest.CallData) == 0 { + return fmt.Errorf("per chain query does not contain any requests") + } + for _, callData := range req.EthCallQueryRequest.CallData { + if len(callData.To) != EvmContractAddressLength { + return fmt.Errorf("invalid length for To contract") + } + if len(callData.Data) > math.MaxUint32 { + return fmt.Errorf("request data too long") + } + } + default: + return fmt.Errorf("received invalid message from query module") } - if len(req.EthCallQueryRequest.Block) > math.MaxUint32 { - return fmt.Errorf("request block too long") + } + + return nil +} + +func SignedQueryRequestEqual(left *gossipv1.SignedQueryRequest, right *gossipv1.SignedQueryRequest) bool { + if !bytes.Equal(left.QueryRequest, right.QueryRequest) { + return false + } + if !bytes.Equal(left.Signature, right.Signature) { + return false + } + return true +} + +func QueryRequestEqual(left *gossipv1.QueryRequest, right *gossipv1.QueryRequest) bool { + if left.Nonce != right.Nonce { + return false + } + if len(left.PerChainQueries) != len(right.PerChainQueries) { + return false + } + + for idx := range left.PerChainQueries { + if left.PerChainQueries[idx].ChainId != right.PerChainQueries[idx].ChainId { + return false } - if !strings.HasPrefix(req.EthCallQueryRequest.Block, "0x") { - return fmt.Errorf("request block must be a hex number or hash starting with 0x") + + switch reqLeft := left.PerChainQueries[idx].Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + switch reqRight := right.PerChainQueries[idx].Message.(type) { + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: + if reqLeft.EthCallQueryRequest.Block != reqRight.EthCallQueryRequest.Block { + return false + } + if len(reqLeft.EthCallQueryRequest.CallData) != len(reqRight.EthCallQueryRequest.CallData) { + return false + } + for idx := range reqLeft.EthCallQueryRequest.CallData { + if !bytes.Equal(reqLeft.EthCallQueryRequest.CallData[idx].To, reqRight.EthCallQueryRequest.CallData[idx].To) { + return false + } + if !bytes.Equal(reqLeft.EthCallQueryRequest.CallData[idx].Data, reqRight.EthCallQueryRequest.CallData[idx].Data) { + return false + } + } + default: + return false + } + default: + return false } - default: - return fmt.Errorf("received invalid message from query module") } - return nil + return true } diff --git a/node/pkg/common/queryResponse.go b/node/pkg/common/queryResponse.go index ec148ca2e2..c8b2c3ac22 100644 --- a/node/pkg/common/queryResponse.go +++ b/node/pkg/common/queryResponse.go @@ -30,37 +30,49 @@ const ( QueryFatalError QueryStatus = -1 ) -type QueryResponse struct { - RequestID string - ChainID vaa.ChainID - Status QueryStatus - SignedRequest *gossipv1.SignedQueryRequest - Result *EthCallQueryResponse +// This is the query response returned from the watcher to the query handler. +type PerChainQueryResponseInternal struct { + RequestID string + RequestIdx int + ChainID vaa.ChainID + Status QueryStatus + Results []EthCallQueryResponse } -func CreateQueryResponse(req *QueryRequest, status QueryStatus, result *EthCallQueryResponse) *QueryResponse { - return &QueryResponse{ - RequestID: req.RequestID, - ChainID: vaa.ChainID(req.Request.ChainId), - SignedRequest: req.SignedRequest, - Status: status, - Result: result, +// CreatePerChainQueryResponseInternal creates a PerChainQueryResponseInternal and returns a pointer to it. +func CreatePerChainQueryResponseInternal(reqId string, reqIdx int, chainID vaa.ChainID, status QueryStatus, results []EthCallQueryResponse) *PerChainQueryResponseInternal { + return &PerChainQueryResponseInternal{ + RequestID: reqId, + RequestIdx: reqIdx, + ChainID: chainID, + Status: status, + Results: results, } } var queryResponsePrefix = []byte("query_response_0000000000000000000|") +type QueryResponsePublication struct { + Request *gossipv1.SignedQueryRequest + PerChainResponses []PerChainQueryResponse +} + +type PerChainQueryResponse struct { + ChainID uint32 + Responses []EthCallQueryResponse +} + type EthCallQueryResponse struct { Number *big.Int Hash common.Hash Time time.Time Result []byte + // NOTE: If you modify this struct, please update the Equal() method for QueryResponsePublication. } -type QueryResponsePublication struct { - Request *gossipv1.SignedQueryRequest - Response EthCallQueryResponse -} +const ( + QUERY_REQUEST_TYPE_ETH_CALL = uint8(1) +) func (resp *QueryResponsePublication) RequestID() string { if resp == nil || resp.Request == nil { @@ -69,8 +81,8 @@ func (resp *QueryResponsePublication) RequestID() string { return hex.EncodeToString(resp.Request.Signature) } -// Marshal serializes the binary representation of a query response -func (msg *QueryResponsePublication) Marshal() ([]byte, error) { +// MarshalQueryResponsePublication serializes the binary representation of a query response +func MarshalQueryResponsePublication(msg *QueryResponsePublication) ([]byte, error) { // TODO: copy request write checks to query module request handling // TODO: only receive the unmarshalled query request (see note in query.go) var queryRequest gossipv1.QueryRequest @@ -79,15 +91,15 @@ func (msg *QueryResponsePublication) Marshal() ([]byte, error) { return nil, fmt.Errorf("received invalid message from query module") } + // Validate things before we start marshalling. if err := ValidateQueryRequest(&queryRequest); err != nil { return nil, fmt.Errorf("queryRequest is invalid: %w", err) } - if len(msg.Response.Hash) != 32 { - return nil, fmt.Errorf("invalid length for block hash") - } - if len(msg.Response.Result) > math.MaxUint32 { - return nil, fmt.Errorf("response data too long") + for idx := range msg.PerChainResponses { + if err := ValidatePerChainResponse(&msg.PerChainResponses[idx]); err != nil { + return nil, fmt.Errorf("invalid per chain response: %w", err) + } } buf := new(bytes.Buffer) @@ -99,31 +111,56 @@ func (msg *QueryResponsePublication) Marshal() ([]byte, error) { buf.Write(msg.Request.Signature[:]) // Request - // TODO: support writing different types of request/response pairs - switch req := queryRequest.Message.(type) { - case *gossipv1.QueryRequest_EthCallQueryRequest: - vaa.MustWrite(buf, binary.BigEndian, uint8(1)) - vaa.MustWrite(buf, binary.BigEndian, uint16(queryRequest.ChainId)) - vaa.MustWrite(buf, binary.BigEndian, queryRequest.Nonce) // uint32 - buf.Write(req.EthCallQueryRequest.To) - vaa.MustWrite(buf, binary.BigEndian, uint32(len(req.EthCallQueryRequest.Data))) - buf.Write(req.EthCallQueryRequest.Data) - vaa.MustWrite(buf, binary.BigEndian, uint32(len(req.EthCallQueryRequest.Block))) - // TODO: should this be an enum or the literal string? - buf.Write([]byte(req.EthCallQueryRequest.Block)) - - // Response - // TODO: probably some kind of request/response pair validation - // TODO: is uint64 safe? - vaa.MustWrite(buf, binary.BigEndian, msg.Response.Number.Uint64()) - buf.Write(msg.Response.Hash[:]) - vaa.MustWrite(buf, binary.BigEndian, uint32(msg.Response.Time.Unix())) - vaa.MustWrite(buf, binary.BigEndian, uint32(len(msg.Response.Result))) - buf.Write(msg.Response.Result) - return buf.Bytes(), nil - default: - return nil, fmt.Errorf("received invalid message from query module") + qrBuf, err := MarshalQueryRequest(&queryRequest) + if err != nil { + return nil, fmt.Errorf("failed to marshal query request") + } + buf.Write(qrBuf) + + // Per chain responses + vaa.MustWrite(buf, binary.BigEndian, uint8(len(msg.PerChainResponses))) + for idx := range msg.PerChainResponses { + pcrBuf, err := MarshalPerChainResponse(&msg.PerChainResponses[idx]) + if err != nil { + return nil, fmt.Errorf("failed to marshal per chain response: %w", err) + } + buf.Write(pcrBuf) + } + + return buf.Bytes(), nil +} + +// MarshalPerChainResponse marshalls a per chain query response. +func MarshalPerChainResponse(pcr *PerChainQueryResponse) ([]byte, error) { + buf := new(bytes.Buffer) + vaa.MustWrite(buf, binary.BigEndian, pcr.ChainID) + vaa.MustWrite(buf, binary.BigEndian, uint8(len(pcr.Responses))) + for _, resp := range pcr.Responses { + vaa.MustWrite(buf, binary.BigEndian, resp.Number.Uint64()) + buf.Write(resp.Hash[:]) + vaa.MustWrite(buf, binary.BigEndian, resp.Time.UnixMicro()) + vaa.MustWrite(buf, binary.BigEndian, uint32(len(resp.Result))) + buf.Write(resp.Result) } + return buf.Bytes(), nil +} + +// ValidatePerChainResponse performs basic validation on a per chain query response. +func ValidatePerChainResponse(pcr *PerChainQueryResponse) error { + if pcr.ChainID > math.MaxUint16 { + return fmt.Errorf("invalid chain ID") + } + + for _, resp := range pcr.Responses { + if len(resp.Hash) != 32 { + return fmt.Errorf("invalid length for block hash") + } + if len(resp.Result) > math.MaxUint32 { + return fmt.Errorf("response data too long") + } + } + + return nil } // Unmarshal deserializes the binary representation of a query response @@ -153,59 +190,11 @@ func UnmarshalQueryResponsePublication(data []byte) (*QueryResponsePublication, } signedQueryRequest.Signature = signature[:] - requestType := uint8(0) - if err := binary.Read(reader, binary.BigEndian, &requestType); err != nil { - return nil, fmt.Errorf("failed to read request chain: %w", err) - } - if requestType != 1 { - // TODO: support reading different types of request/response pairs - return nil, fmt.Errorf("unsupported request type: %d", requestType) - } - - queryRequest := &gossipv1.QueryRequest{} - queryChain := vaa.ChainID(0) - if err := binary.Read(reader, binary.BigEndian, &queryChain); err != nil { - return nil, fmt.Errorf("failed to read request chain: %w", err) - } - queryRequest.ChainId = uint32(queryChain) - - queryNonce := uint32(0) - if err := binary.Read(reader, binary.BigEndian, &queryNonce); err != nil { - return nil, fmt.Errorf("failed to read request nonce: %w", err) - } - queryRequest.Nonce = queryNonce - - ethCallQueryRequest := &gossipv1.EthCallQueryRequest{} - - queryEthCallTo := [20]byte{} - if n, err := reader.Read(queryEthCallTo[:]); err != nil || n != 20 { - return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) - } - ethCallQueryRequest.To = queryEthCallTo[:] - - queryEthCallDataLen := uint32(0) - if err := binary.Read(reader, binary.BigEndian, &queryEthCallDataLen); err != nil { - return nil, fmt.Errorf("failed to read call Data len: %w", err) - } - queryEthCallData := make([]byte, queryEthCallDataLen) - if n, err := reader.Read(queryEthCallData[:]); err != nil || n != int(queryEthCallDataLen) { - return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) - } - ethCallQueryRequest.Data = queryEthCallData[:] - - queryEthCallBlockLen := uint32(0) - if err := binary.Read(reader, binary.BigEndian, &queryEthCallBlockLen); err != nil { - return nil, fmt.Errorf("failed to read call Data len: %w", err) - } - queryEthCallBlockBytes := make([]byte, queryEthCallBlockLen) - if n, err := reader.Read(queryEthCallBlockBytes[:]); err != nil || n != int(queryEthCallBlockLen) { - return nil, fmt.Errorf("failed to read call To [%d]: %w", n, err) + queryRequest, err := UnmarshalQueryRequestFromReader(reader) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query request: %w", err) } - ethCallQueryRequest.Block = string(queryEthCallBlockBytes[:]) - queryRequest.Message = &gossipv1.QueryRequest_EthCallQueryRequest{ - EthCallQueryRequest: ethCallQueryRequest, - } queryRequestBytes, err := proto.Marshal(queryRequest) if err != nil { return nil, err @@ -214,41 +203,73 @@ func UnmarshalQueryResponsePublication(data []byte) (*QueryResponsePublication, msg.Request = signedQueryRequest - // Response - queryResponse := EthCallQueryResponse{} - - responseNumber := uint64(0) - if err := binary.Read(reader, binary.BigEndian, &responseNumber); err != nil { - return nil, fmt.Errorf("failed to read response number: %w", err) + // Responses + numPerChainResponses := uint8(0) + if err := binary.Read(reader, binary.BigEndian, &numPerChainResponses); err != nil { + return nil, fmt.Errorf("failed to read number of per chain responses: %w", err) } - responseNumberBig := big.NewInt(0).SetUint64(responseNumber) - queryResponse.Number = responseNumberBig - responseHash := common.Hash{} - if n, err := reader.Read(responseHash[:]); err != nil || n != 32 { - return nil, fmt.Errorf("failed to read response hash [%d]: %w", n, err) + for count := 0; count < int(numPerChainResponses); count++ { + pcr, err := UnmarshalQueryPerChainResponseFromReader(reader) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal per chain response: %w", err) + } + msg.PerChainResponses = append(msg.PerChainResponses, *pcr) } - queryResponse.Hash = responseHash - unixSeconds := uint32(0) - if err := binary.Read(reader, binary.BigEndian, &unixSeconds); err != nil { - return nil, fmt.Errorf("failed to read response timestamp: %w", err) - } - queryResponse.Time = time.Unix(int64(unixSeconds), 0) + return msg, nil +} + +func UnmarshalQueryPerChainResponseFromReader(reader *bytes.Reader) (*PerChainQueryResponse, error) { + pcr := PerChainQueryResponse{} - responseResultLen := uint32(0) - if err := binary.Read(reader, binary.BigEndian, &responseResultLen); err != nil { - return nil, fmt.Errorf("failed to read response len: %w", err) + chainID := uint32(0) + if err := binary.Read(reader, binary.BigEndian, &chainID); err != nil { + return nil, fmt.Errorf("failed to read chain ID: %w", err) } - responseResult := make([]byte, responseResultLen) - if n, err := reader.Read(responseResult[:]); err != nil || n != int(responseResultLen) { - return nil, fmt.Errorf("failed to read result [%d]: %w", n, err) + pcr.ChainID = chainID + + numResponses := uint8(0) + if err := binary.Read(reader, binary.BigEndian, &numResponses); err != nil { + return nil, fmt.Errorf("failed to read number of responses: %w", err) } - queryResponse.Result = responseResult[:] - msg.Response = queryResponse + for count := 0; count < int(numResponses); count++ { + queryResponse := EthCallQueryResponse{} + + responseNumber := uint64(0) + if err := binary.Read(reader, binary.BigEndian, &responseNumber); err != nil { + return nil, fmt.Errorf("failed to read response number: %w", err) + } + responseNumberBig := big.NewInt(0).SetUint64(responseNumber) + queryResponse.Number = responseNumberBig + + responseHash := common.Hash{} + if n, err := reader.Read(responseHash[:]); err != nil || n != 32 { + return nil, fmt.Errorf("failed to read response hash [%d]: %w", n, err) + } + queryResponse.Hash = responseHash + + unixMicros := int64(0) + if err := binary.Read(reader, binary.BigEndian, &unixMicros); err != nil { + return nil, fmt.Errorf("failed to read response timestamp: %w", err) + } + queryResponse.Time = time.UnixMicro(unixMicros) + + responseResultLen := uint32(0) + if err := binary.Read(reader, binary.BigEndian, &responseResultLen); err != nil { + return nil, fmt.Errorf("failed to read response len: %w", err) + } + responseResult := make([]byte, responseResultLen) + if n, err := reader.Read(responseResult[:]); err != nil || n != int(responseResultLen) { + return nil, fmt.Errorf("failed to read result [%d]: %w", n, err) + } + queryResponse.Result = responseResult[:] + + pcr.Responses = append(pcr.Responses, queryResponse) + } - return msg, nil + return &pcr, nil } // Similar to sdk/vaa/structs.go, @@ -256,13 +277,55 @@ func UnmarshalQueryResponsePublication(data []byte) (*QueryResponsePublication, // the first hash (32 bytes) vs the full body data. // TODO: confirm if this works / is worthwhile. func (msg *QueryResponsePublication) SigningDigest() (common.Hash, error) { - msgBytes, err := msg.Marshal() + msgBytes, err := MarshalQueryResponsePublication(msg) if err != nil { return common.Hash{}, err } return GetQueryResponseDigestFromBytes(msgBytes), nil } +// GetQueryResponseDigestFromBytes computes the digest bytes for a query response byte array. func GetQueryResponseDigestFromBytes(b []byte) common.Hash { return crypto.Keccak256Hash(append(queryResponsePrefix, crypto.Keccak256Hash(b).Bytes()...)) } + +// Equal checks for equality on two query response publications. +func (left *QueryResponsePublication) Equal(right *QueryResponsePublication) bool { + if !bytes.Equal(left.Request.QueryRequest, right.Request.QueryRequest) || !bytes.Equal(left.Request.Signature, right.Request.Signature) { + return false + } + if len(left.PerChainResponses) != len(right.PerChainResponses) { + return false + } + for idx := range left.PerChainResponses { + if !left.PerChainResponses[idx].Equal(&right.PerChainResponses[idx]) { + return false + } + } + return true +} + +// Equal checks for equality on two per chain query responses. +func (left *PerChainQueryResponse) Equal(right *PerChainQueryResponse) bool { + if left.ChainID != right.ChainID { + return false + } + if len(left.Responses) != len(right.Responses) { + return false + } + for idx := range left.Responses { + if left.Responses[idx].Number.Cmp(right.Responses[idx].Number) != 0 { + return false + } + if !bytes.Equal(left.Responses[idx].Hash.Bytes(), right.Responses[idx].Hash.Bytes()) { + return false + } + if left.Responses[idx].Time != right.Responses[idx].Time { + return false + } + if !bytes.Equal(left.Responses[idx].Result, right.Responses[idx].Result) { + return false + } + } + return true +} diff --git a/node/pkg/common/query_test.go b/node/pkg/common/query_test.go new file mode 100644 index 0000000000..9004ff9ee4 --- /dev/null +++ b/node/pkg/common/query_test.go @@ -0,0 +1,181 @@ +package common + +import ( + "encoding/hex" + "math/big" + "strings" + "testing" + "time" + + gossipv1 "github.com/certusone/wormhole/node/pkg/proto/gossip/v1" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/accounts/abi" + ethCommon "github.com/ethereum/go-ethereum/common" + + "google.golang.org/protobuf/proto" +) + +func createQueryRequestForTesting() *gossipv1.QueryRequest { + // Create a query request. + wethAbi, err := abi.JSON(strings.NewReader("[{\"constant\":true,\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"}]")) + if err != nil { + panic(err) + } + + data1, err := wethAbi.Pack("name") + if err != nil { + panic(err) + } + data2, err := wethAbi.Pack("totalSupply") + if err != nil { + panic(err) + } + + to, _ := hex.DecodeString("0d500b1d8e8ef31e21c99d1db9a6444d3adf1270") + block := "0x28d9630" + callData := []*gossipv1.EthCallQueryRequest_EthCallData{ + { + To: to, + Data: data1, + }, + { + To: to, + Data: data2, + }, + } + callRequest := &gossipv1.EthCallQueryRequest{ + Block: block, + CallData: callData, + } + + perChainQuery := &gossipv1.PerChainQueryRequest{ + ChainId: 5, + Message: &gossipv1.PerChainQueryRequest_EthCallQueryRequest{ + EthCallQueryRequest: callRequest, + }, + } + + queryRequest := &gossipv1.QueryRequest{ + Nonce: 1, + PerChainQueries: []*gossipv1.PerChainQueryRequest{perChainQuery}, + } + + return queryRequest +} + +// A timestamp has nanos, but we only marshal down to micros, so trim our time to micros for testing purposes. +func timeForTest(t time.Time) time.Time { + return time.UnixMicro(t.UnixMicro()) +} + +func TestQueryRequestProtoMarshalUnMarshal(t *testing.T) { + queryRequest := createQueryRequestForTesting() + queryRequestBytes, err := proto.Marshal(queryRequest) + require.NoError(t, err) + + var queryRequest2 gossipv1.QueryRequest + err = proto.Unmarshal(queryRequestBytes, &queryRequest2) + require.NoError(t, err) + + assert.True(t, QueryRequestEqual(queryRequest, &queryRequest2)) +} + +func TestQueryRequestMarshalUnMarshal(t *testing.T) { + queryRequest := createQueryRequestForTesting() + queryRequestBytes, err := MarshalQueryRequest(queryRequest) + require.NoError(t, err) + + queryRequest2, err := UnmarshalQueryRequest(queryRequestBytes) + require.NoError(t, err) + + assert.True(t, QueryRequestEqual(queryRequest, queryRequest2)) +} + +func TestQueryResponseMarshalUnMarshal(t *testing.T) { + queryRequest := createQueryRequestForTesting() + queryRequestBytes, err := proto.Marshal(queryRequest) + require.NoError(t, err) + + sig := [65]byte{} + signedQueryRequest := &gossipv1.SignedQueryRequest{ + QueryRequest: queryRequestBytes, + Signature: sig[:], + } + + results, err := hex.DecodeString("010203040506070809") + require.NoError(t, err) + + respPub := &QueryResponsePublication{ + Request: signedQueryRequest, + PerChainResponses: []PerChainQueryResponse{ + { + ChainID: 5, + Responses: []EthCallQueryResponse{ + { + Number: big.NewInt(42), + Hash: ethCommon.HexToHash("0x9999bac44d09a7f69ee7941819b0a19c59ccb1969640cc513be09ef95ed2d8e2"), + Time: timeForTest(time.Now()), + Result: results, + }, + { + Number: big.NewInt(43), + Hash: ethCommon.HexToHash("0x9999bac44d09a7f69ee7941819b0a19c59ccb1969640cc513be09ef9deadbeef"), + Time: timeForTest(time.Now()), + Result: results, + }, + }, + }, + { + ChainID: 11, + Responses: []EthCallQueryResponse{ + { + Number: big.NewInt(44), + Hash: ethCommon.HexToHash("0x9999bac44d09a7f69ee7941819b0a19c59ccb1969640cc513be09ef95ed2d8e3"), + Time: timeForTest(time.Now()), + Result: results, + }, + }, + }, + }, + } + + respPubBytes, err := MarshalQueryResponsePublication(respPub) + require.NoError(t, err) + + respPub2, err := UnmarshalQueryResponsePublication(respPubBytes) + require.NoError(t, err) + require.NotNil(t, respPub2) + + assert.True(t, respPub.Equal(respPub2)) +} + +/* +func TesMarshalUnMarshalQueryResponseWithNoResults(t *testing.T) { + queryRequest := createQueryRequestForTesting() + queryRequestBytes, err := proto.Marshal(queryRequest) + require.NoError(t, err) + + sig := [65]byte{} + signedQueryRequest := &gossipv1.SignedQueryRequest{ + QueryRequest: queryRequestBytes, + Signature: sig[:], + } + + respPub := &QueryResponsePublication{ + Request: signedQueryRequest, + Responses: nil, + } + + respPubBytes, err := MarshalQueryResponsePublication(respPub) + require.NoError(t, err) + + respPub2, err := UnmarshalQueryResponsePublication(respPubBytes) + require.NoError(t, err) + require.NotNil(t, respPub2) + + assert.True(t, respPub.Equal(respPub2)) +} +*/ diff --git a/node/pkg/p2p/p2p.go b/node/pkg/p2p/p2p.go index f3179684c8..c2c44bae58 100644 --- a/node/pkg/p2p/p2p.go +++ b/node/pkg/p2p/p2p.go @@ -422,7 +422,7 @@ func Run( logger.Error("received a cross chain query response when the feature is disabled, dropping it", zap.String("component", "ccqp2p")) continue } - msgBytes, err := msg.Marshal() + msgBytes, err := node_common.MarshalQueryResponsePublication(msg) if err != nil { logger.Error("failed to marshal query response", zap.Error(err), zap.String("component", "ccqp2p")) continue diff --git a/node/pkg/proto/gossip/v1/gossip.pb.go b/node/pkg/proto/gossip/v1/gossip.pb.go index 134c798fcc..04691f6769 100644 --- a/node/pkg/proto/gossip/v1/gossip.pb.go +++ b/node/pkg/proto/gossip/v1/gossip.pb.go @@ -1217,12 +1217,8 @@ type QueryRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId uint32 `protobuf:"varint,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Nonce uint32 `protobuf:"varint,2,opt,name=nonce,proto3" json:"nonce,omitempty"` - // Types that are assignable to Message: - // - // *QueryRequest_EthCallQueryRequest - Message isQueryRequest_Message `protobuf_oneof:"message"` + Nonce uint32 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` + PerChainQueries []*PerChainQueryRequest `protobuf:"bytes,2,rep,name=per_chain_queries,json=perChainQueries,proto3" json:"per_chain_queries,omitempty"` } func (x *QueryRequest) Reset() { @@ -1257,58 +1253,108 @@ func (*QueryRequest) Descriptor() ([]byte, []int) { return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{14} } -func (x *QueryRequest) GetChainId() uint32 { +func (x *QueryRequest) GetNonce() uint32 { if x != nil { - return x.ChainId + return x.Nonce } return 0 } -func (x *QueryRequest) GetNonce() uint32 { +func (x *QueryRequest) GetPerChainQueries() []*PerChainQueryRequest { if x != nil { - return x.Nonce + return x.PerChainQueries + } + return nil +} + +type PerChainQueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChainId uint32 `protobuf:"varint,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Types that are assignable to Message: + // + // *PerChainQueryRequest_EthCallQueryRequest + Message isPerChainQueryRequest_Message `protobuf_oneof:"message"` +} + +func (x *PerChainQueryRequest) Reset() { + *x = PerChainQueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gossip_v1_gossip_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PerChainQueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PerChainQueryRequest) ProtoMessage() {} + +func (x *PerChainQueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_gossip_v1_gossip_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PerChainQueryRequest.ProtoReflect.Descriptor instead. +func (*PerChainQueryRequest) Descriptor() ([]byte, []int) { + return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{15} +} + +func (x *PerChainQueryRequest) GetChainId() uint32 { + if x != nil { + return x.ChainId } return 0 } -func (m *QueryRequest) GetMessage() isQueryRequest_Message { +func (m *PerChainQueryRequest) GetMessage() isPerChainQueryRequest_Message { if m != nil { return m.Message } return nil } -func (x *QueryRequest) GetEthCallQueryRequest() *EthCallQueryRequest { - if x, ok := x.GetMessage().(*QueryRequest_EthCallQueryRequest); ok { +func (x *PerChainQueryRequest) GetEthCallQueryRequest() *EthCallQueryRequest { + if x, ok := x.GetMessage().(*PerChainQueryRequest_EthCallQueryRequest); ok { return x.EthCallQueryRequest } return nil } -type isQueryRequest_Message interface { - isQueryRequest_Message() +type isPerChainQueryRequest_Message interface { + isPerChainQueryRequest_Message() } -type QueryRequest_EthCallQueryRequest struct { +type PerChainQueryRequest_EthCallQueryRequest struct { EthCallQueryRequest *EthCallQueryRequest `protobuf:"bytes,3,opt,name=eth_call_query_request,json=ethCallQueryRequest,proto3,oneof"` } -func (*QueryRequest_EthCallQueryRequest) isQueryRequest_Message() {} +func (*PerChainQueryRequest_EthCallQueryRequest) isPerChainQueryRequest_Message() {} type EthCallQueryRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - To []byte `protobuf:"bytes,1,opt,name=to,proto3" json:"to,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Block string `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` + Block string `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + CallData []*EthCallQueryRequest_EthCallData `protobuf:"bytes,2,rep,name=call_data,json=callData,proto3" json:"call_data,omitempty"` } func (x *EthCallQueryRequest) Reset() { *x = EthCallQueryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[15] + mi := &file_gossip_v1_gossip_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1321,7 +1367,7 @@ func (x *EthCallQueryRequest) String() string { func (*EthCallQueryRequest) ProtoMessage() {} func (x *EthCallQueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[15] + mi := &file_gossip_v1_gossip_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1334,30 +1380,23 @@ func (x *EthCallQueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EthCallQueryRequest.ProtoReflect.Descriptor instead. func (*EthCallQueryRequest) Descriptor() ([]byte, []int) { - return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{15} + return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{16} } -func (x *EthCallQueryRequest) GetTo() []byte { +func (x *EthCallQueryRequest) GetBlock() string { if x != nil { - return x.To + return x.Block } - return nil + return "" } -func (x *EthCallQueryRequest) GetData() []byte { +func (x *EthCallQueryRequest) GetCallData() []*EthCallQueryRequest_EthCallData { if x != nil { - return x.Data + return x.CallData } return nil } -func (x *EthCallQueryRequest) GetBlock() string { - if x != nil { - return x.Block - } - return "" -} - type SignedQueryResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1372,7 +1411,7 @@ type SignedQueryResponse struct { func (x *SignedQueryResponse) Reset() { *x = SignedQueryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[16] + mi := &file_gossip_v1_gossip_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1385,7 +1424,7 @@ func (x *SignedQueryResponse) String() string { func (*SignedQueryResponse) ProtoMessage() {} func (x *SignedQueryResponse) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[16] + mi := &file_gossip_v1_gossip_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1398,7 +1437,7 @@ func (x *SignedQueryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SignedQueryResponse.ProtoReflect.Descriptor instead. func (*SignedQueryResponse) Descriptor() ([]byte, []int) { - return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{16} + return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{17} } func (x *SignedQueryResponse) GetQueryResponse() []byte { @@ -1433,7 +1472,7 @@ type Heartbeat_Network struct { func (x *Heartbeat_Network) Reset() { *x = Heartbeat_Network{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[17] + mi := &file_gossip_v1_gossip_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1446,7 +1485,7 @@ func (x *Heartbeat_Network) String() string { func (*Heartbeat_Network) ProtoMessage() {} func (x *Heartbeat_Network) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[17] + mi := &file_gossip_v1_gossip_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1503,7 +1542,7 @@ type ChainGovernorConfig_Chain struct { func (x *ChainGovernorConfig_Chain) Reset() { *x = ChainGovernorConfig_Chain{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[18] + mi := &file_gossip_v1_gossip_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1516,7 +1555,7 @@ func (x *ChainGovernorConfig_Chain) String() string { func (*ChainGovernorConfig_Chain) ProtoMessage() {} func (x *ChainGovernorConfig_Chain) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[18] + mi := &file_gossip_v1_gossip_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1566,7 +1605,7 @@ type ChainGovernorConfig_Token struct { func (x *ChainGovernorConfig_Token) Reset() { *x = ChainGovernorConfig_Token{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[19] + mi := &file_gossip_v1_gossip_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1579,7 +1618,7 @@ func (x *ChainGovernorConfig_Token) String() string { func (*ChainGovernorConfig_Token) ProtoMessage() {} func (x *ChainGovernorConfig_Token) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[19] + mi := &file_gossip_v1_gossip_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1630,7 +1669,7 @@ type ChainGovernorStatus_EnqueuedVAA struct { func (x *ChainGovernorStatus_EnqueuedVAA) Reset() { *x = ChainGovernorStatus_EnqueuedVAA{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[20] + mi := &file_gossip_v1_gossip_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1643,7 +1682,7 @@ func (x *ChainGovernorStatus_EnqueuedVAA) String() string { func (*ChainGovernorStatus_EnqueuedVAA) ProtoMessage() {} func (x *ChainGovernorStatus_EnqueuedVAA) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[20] + mi := &file_gossip_v1_gossip_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1700,7 +1739,7 @@ type ChainGovernorStatus_Emitter struct { func (x *ChainGovernorStatus_Emitter) Reset() { *x = ChainGovernorStatus_Emitter{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[21] + mi := &file_gossip_v1_gossip_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1713,7 +1752,7 @@ func (x *ChainGovernorStatus_Emitter) String() string { func (*ChainGovernorStatus_Emitter) ProtoMessage() {} func (x *ChainGovernorStatus_Emitter) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[21] + mi := &file_gossip_v1_gossip_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1763,7 +1802,7 @@ type ChainGovernorStatus_Chain struct { func (x *ChainGovernorStatus_Chain) Reset() { *x = ChainGovernorStatus_Chain{} if protoimpl.UnsafeEnabled { - mi := &file_gossip_v1_gossip_proto_msgTypes[22] + mi := &file_gossip_v1_gossip_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1776,7 +1815,7 @@ func (x *ChainGovernorStatus_Chain) String() string { func (*ChainGovernorStatus_Chain) ProtoMessage() {} func (x *ChainGovernorStatus_Chain) ProtoReflect() protoreflect.Message { - mi := &file_gossip_v1_gossip_proto_msgTypes[22] + mi := &file_gossip_v1_gossip_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1813,6 +1852,61 @@ func (x *ChainGovernorStatus_Chain) GetEmitters() []*ChainGovernorStatus_Emitter return nil } +type EthCallQueryRequest_EthCallData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + To []byte `protobuf:"bytes,1,opt,name=to,proto3" json:"to,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *EthCallQueryRequest_EthCallData) Reset() { + *x = EthCallQueryRequest_EthCallData{} + if protoimpl.UnsafeEnabled { + mi := &file_gossip_v1_gossip_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EthCallQueryRequest_EthCallData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EthCallQueryRequest_EthCallData) ProtoMessage() {} + +func (x *EthCallQueryRequest_EthCallData) ProtoReflect() protoreflect.Message { + mi := &file_gossip_v1_gossip_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EthCallQueryRequest_EthCallData.ProtoReflect.Descriptor instead. +func (*EthCallQueryRequest_EthCallData) Descriptor() ([]byte, []int) { + return file_gossip_v1_gossip_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *EthCallQueryRequest_EthCallData) GetTo() []byte { + if x != nil { + return x.To + } + return nil +} + +func (x *EthCallQueryRequest_EthCallData) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + var File_gossip_v1_gossip_proto protoreflect.FileDescriptor var file_gossip_v1_gossip_proto_rawDesc = []byte{ @@ -2051,32 +2145,44 @@ var file_gossip_v1_gossip_proto_rawDesc = []byte{ 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0xa1, 0x01, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x16, 0x65, 0x74, 0x68, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x13, 0x65, 0x74, 0x68, 0x43, 0x61, 0x6c, - 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x45, 0x74, 0x68, 0x43, - 0x61, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x5a, 0x0a, 0x13, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x25, 0x0a, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x75, 0x73, 0x6f, 0x6e, 0x65, 0x2f, 0x77, 0x6f, - 0x72, 0x6d, 0x68, 0x6f, 0x6c, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2f, 0x76, 0x31, 0x3b, - 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x75, 0x72, 0x65, 0x22, 0x71, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x70, 0x65, + 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x70, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x55, 0x0a, 0x16, 0x65, + 0x74, 0x68, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x73, 0x73, 0x69, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x13, 0x65, + 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xa7, 0x01, + 0x0a, 0x13, 0x45, 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x47, 0x0a, 0x09, 0x63, + 0x61, 0x6c, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x74, 0x68, 0x43, 0x61, + 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, + 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x31, 0x0a, 0x0b, 0x45, 0x74, 0x68, 0x43, 0x61, 0x6c, 0x6c, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x02, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x5a, 0x0a, 0x13, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x75, 0x73, 0x6f, 0x6e, 0x65, 0x2f, 0x77, 0x6f, 0x72, 0x6d, + 0x68, 0x6f, 0x6c, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2f, 0x76, 0x31, 0x3b, 0x67, 0x6f, + 0x73, 0x73, 0x69, 0x70, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2091,7 +2197,7 @@ func file_gossip_v1_gossip_proto_rawDescGZIP() []byte { return file_gossip_v1_gossip_proto_rawDescData } -var file_gossip_v1_gossip_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_gossip_v1_gossip_proto_msgTypes = make([]protoimpl.MessageInfo, 25) var file_gossip_v1_gossip_proto_goTypes = []interface{}{ (*GossipMessage)(nil), // 0: gossip.v1.GossipMessage (*SignedHeartbeat)(nil), // 1: gossip.v1.SignedHeartbeat @@ -2108,14 +2214,16 @@ var file_gossip_v1_gossip_proto_goTypes = []interface{}{ (*ChainGovernorStatus)(nil), // 12: gossip.v1.ChainGovernorStatus (*SignedQueryRequest)(nil), // 13: gossip.v1.SignedQueryRequest (*QueryRequest)(nil), // 14: gossip.v1.QueryRequest - (*EthCallQueryRequest)(nil), // 15: gossip.v1.EthCallQueryRequest - (*SignedQueryResponse)(nil), // 16: gossip.v1.SignedQueryResponse - (*Heartbeat_Network)(nil), // 17: gossip.v1.Heartbeat.Network - (*ChainGovernorConfig_Chain)(nil), // 18: gossip.v1.ChainGovernorConfig.Chain - (*ChainGovernorConfig_Token)(nil), // 19: gossip.v1.ChainGovernorConfig.Token - (*ChainGovernorStatus_EnqueuedVAA)(nil), // 20: gossip.v1.ChainGovernorStatus.EnqueuedVAA - (*ChainGovernorStatus_Emitter)(nil), // 21: gossip.v1.ChainGovernorStatus.Emitter - (*ChainGovernorStatus_Chain)(nil), // 22: gossip.v1.ChainGovernorStatus.Chain + (*PerChainQueryRequest)(nil), // 15: gossip.v1.PerChainQueryRequest + (*EthCallQueryRequest)(nil), // 16: gossip.v1.EthCallQueryRequest + (*SignedQueryResponse)(nil), // 17: gossip.v1.SignedQueryResponse + (*Heartbeat_Network)(nil), // 18: gossip.v1.Heartbeat.Network + (*ChainGovernorConfig_Chain)(nil), // 19: gossip.v1.ChainGovernorConfig.Chain + (*ChainGovernorConfig_Token)(nil), // 20: gossip.v1.ChainGovernorConfig.Token + (*ChainGovernorStatus_EnqueuedVAA)(nil), // 21: gossip.v1.ChainGovernorStatus.EnqueuedVAA + (*ChainGovernorStatus_Emitter)(nil), // 22: gossip.v1.ChainGovernorStatus.Emitter + (*ChainGovernorStatus_Chain)(nil), // 23: gossip.v1.ChainGovernorStatus.Chain + (*EthCallQueryRequest_EthCallData)(nil), // 24: gossip.v1.EthCallQueryRequest.EthCallData } var file_gossip_v1_gossip_proto_depIdxs = []int32{ 3, // 0: gossip.v1.GossipMessage.signed_observation:type_name -> gossip.v1.SignedObservation @@ -2127,19 +2235,21 @@ var file_gossip_v1_gossip_proto_depIdxs = []int32{ 9, // 6: gossip.v1.GossipMessage.signed_chain_governor_config:type_name -> gossip.v1.SignedChainGovernorConfig 11, // 7: gossip.v1.GossipMessage.signed_chain_governor_status:type_name -> gossip.v1.SignedChainGovernorStatus 13, // 8: gossip.v1.GossipMessage.signed_query_request:type_name -> gossip.v1.SignedQueryRequest - 16, // 9: gossip.v1.GossipMessage.signed_query_response:type_name -> gossip.v1.SignedQueryResponse - 17, // 10: gossip.v1.Heartbeat.networks:type_name -> gossip.v1.Heartbeat.Network - 18, // 11: gossip.v1.ChainGovernorConfig.chains:type_name -> gossip.v1.ChainGovernorConfig.Chain - 19, // 12: gossip.v1.ChainGovernorConfig.tokens:type_name -> gossip.v1.ChainGovernorConfig.Token - 22, // 13: gossip.v1.ChainGovernorStatus.chains:type_name -> gossip.v1.ChainGovernorStatus.Chain - 15, // 14: gossip.v1.QueryRequest.eth_call_query_request:type_name -> gossip.v1.EthCallQueryRequest - 20, // 15: gossip.v1.ChainGovernorStatus.Emitter.enqueued_vaas:type_name -> gossip.v1.ChainGovernorStatus.EnqueuedVAA - 21, // 16: gossip.v1.ChainGovernorStatus.Chain.emitters:type_name -> gossip.v1.ChainGovernorStatus.Emitter - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 17, // 9: gossip.v1.GossipMessage.signed_query_response:type_name -> gossip.v1.SignedQueryResponse + 18, // 10: gossip.v1.Heartbeat.networks:type_name -> gossip.v1.Heartbeat.Network + 19, // 11: gossip.v1.ChainGovernorConfig.chains:type_name -> gossip.v1.ChainGovernorConfig.Chain + 20, // 12: gossip.v1.ChainGovernorConfig.tokens:type_name -> gossip.v1.ChainGovernorConfig.Token + 23, // 13: gossip.v1.ChainGovernorStatus.chains:type_name -> gossip.v1.ChainGovernorStatus.Chain + 15, // 14: gossip.v1.QueryRequest.per_chain_queries:type_name -> gossip.v1.PerChainQueryRequest + 16, // 15: gossip.v1.PerChainQueryRequest.eth_call_query_request:type_name -> gossip.v1.EthCallQueryRequest + 24, // 16: gossip.v1.EthCallQueryRequest.call_data:type_name -> gossip.v1.EthCallQueryRequest.EthCallData + 21, // 17: gossip.v1.ChainGovernorStatus.Emitter.enqueued_vaas:type_name -> gossip.v1.ChainGovernorStatus.EnqueuedVAA + 22, // 18: gossip.v1.ChainGovernorStatus.Chain.emitters:type_name -> gossip.v1.ChainGovernorStatus.Emitter + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_gossip_v1_gossip_proto_init() } @@ -2329,7 +2439,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EthCallQueryRequest); i { + switch v := v.(*PerChainQueryRequest); i { case 0: return &v.state case 1: @@ -2341,7 +2451,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignedQueryResponse); i { + switch v := v.(*EthCallQueryRequest); i { case 0: return &v.state case 1: @@ -2353,7 +2463,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Heartbeat_Network); i { + switch v := v.(*SignedQueryResponse); i { case 0: return &v.state case 1: @@ -2365,7 +2475,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChainGovernorConfig_Chain); i { + switch v := v.(*Heartbeat_Network); i { case 0: return &v.state case 1: @@ -2377,7 +2487,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChainGovernorConfig_Token); i { + switch v := v.(*ChainGovernorConfig_Chain); i { case 0: return &v.state case 1: @@ -2389,7 +2499,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChainGovernorStatus_EnqueuedVAA); i { + switch v := v.(*ChainGovernorConfig_Token); i { case 0: return &v.state case 1: @@ -2401,7 +2511,7 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChainGovernorStatus_Emitter); i { + switch v := v.(*ChainGovernorStatus_EnqueuedVAA); i { case 0: return &v.state case 1: @@ -2413,6 +2523,18 @@ func file_gossip_v1_gossip_proto_init() { } } file_gossip_v1_gossip_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChainGovernorStatus_Emitter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gossip_v1_gossip_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChainGovernorStatus_Chain); i { case 0: return &v.state @@ -2424,6 +2546,18 @@ func file_gossip_v1_gossip_proto_init() { return nil } } + file_gossip_v1_gossip_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EthCallQueryRequest_EthCallData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_gossip_v1_gossip_proto_msgTypes[0].OneofWrappers = []interface{}{ (*GossipMessage_SignedObservation)(nil), @@ -2437,8 +2571,8 @@ func file_gossip_v1_gossip_proto_init() { (*GossipMessage_SignedQueryRequest)(nil), (*GossipMessage_SignedQueryResponse)(nil), } - file_gossip_v1_gossip_proto_msgTypes[14].OneofWrappers = []interface{}{ - (*QueryRequest_EthCallQueryRequest)(nil), + file_gossip_v1_gossip_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*PerChainQueryRequest_EthCallQueryRequest)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -2446,7 +2580,7 @@ func file_gossip_v1_gossip_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_gossip_v1_gossip_proto_rawDesc, NumEnums: 0, - NumMessages: 23, + NumMessages: 25, NumExtensions: 0, NumServices: 0, }, diff --git a/node/pkg/watchers/evm/watcher.go b/node/pkg/watchers/evm/watcher.go index 4cdc3be40e..6b81481df3 100644 --- a/node/pkg/watchers/evm/watcher.go +++ b/node/pkg/watchers/evm/watcher.go @@ -96,10 +96,10 @@ type ( // Incoming query requests from the network. Pre-filtered to only // include requests for our chainID. - queryReqC <-chan *common.QueryRequest + queryReqC <-chan *common.PerChainQueryInternal // Outbound query responses to query requests - queryResponseC chan<- *common.QueryResponse + queryResponseC chan<- *common.PerChainQueryResponseInternal pending map[pendingKey]*pendingMessage pendingMu sync.Mutex @@ -150,8 +150,8 @@ func NewEthWatcher( msgC chan<- *common.MessagePublication, setC chan<- *common.GuardianSet, obsvReqC <-chan *gossipv1.ObservationRequest, - queryReqC <-chan *common.QueryRequest, - queryResponseC chan<- *common.QueryResponse, + queryReqC <-chan *common.PerChainQueryInternal, + queryResponseC chan<- *common.PerChainQueryResponseInternal, unsafeDevMode bool, ) *Watcher { @@ -536,24 +536,18 @@ func (w *Watcher) Run(parentCtx context.Context) error { } switch req := queryRequest.Request.Message.(type) { - case *gossipv1.QueryRequest_EthCallQueryRequest: - to := eth_common.BytesToAddress(req.EthCallQueryRequest.To) - data := eth_hexutil.Encode(req.EthCallQueryRequest.Data) + case *gossipv1.PerChainQueryRequest_EthCallQueryRequest: block := req.EthCallQueryRequest.Block logger.Info("received query request", zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), zap.String("block", block), + zap.Int("numRequests", len(req.EthCallQueryRequest.CallData)), zap.String("component", "ccqevm"), ) timeout, cancel := context.WithTimeout(ctx, 5*time.Second) // like https://github.com/ethereum/go-ethereum/blob/master/ethclient/ethclient.go#L610 - callTransactionArg := map[string]interface{}{ - "to": to, - "data": data, - } + var blockMethod string var callBlockArg interface{} // TODO: try making these error and see what happens @@ -579,38 +573,70 @@ func (w *Watcher) Run(parentCtx context.Context) error { blockMethod = "eth_getBlockByNumber" callBlockArg = block } - var blockResult connectors.BlockMarshaller - var blockError error - var callResult eth_hexutil.Bytes - var callErr error - err := w.ethConn.RawBatchCallContext(timeout, []rpc.BatchElem{ - { - Method: blockMethod, - Args: []interface{}{ - block, - false, // no full transaction details + + // EvmCallData contains the details of a single query in the batch. + type EvmCallData struct { + to eth_common.Address + data string + callTransactionArg map[string]interface{} + callResult *eth_hexutil.Bytes + callErr error + } + + // We build two slices. The first is the batch submitted to the RPC call. It contains one entry for each query plus one to query the block. + // The second is the data associated with each request (but not the block request). The index into both is the index into the request call data. + batch := []rpc.BatchElem{} + evmCallData := []EvmCallData{} + + // Add each requested query to the batch. + for _, callData := range req.EthCallQueryRequest.CallData { + // like https://github.com/ethereum/go-ethereum/blob/master/ethclient/ethclient.go#L610 + to := eth_common.BytesToAddress(callData.To) + data := eth_hexutil.Encode(callData.Data) + ecd := EvmCallData{ + to: to, + data: data, + callTransactionArg: map[string]interface{}{ + "to": to, + "data": data, }, - Result: &blockResult, - Error: blockError, - }, - { + callResult: ð_hexutil.Bytes{}, + } + evmCallData = append(evmCallData, ecd) + + batch = append(batch, rpc.BatchElem{ Method: "eth_call", Args: []interface{}{ - callTransactionArg, + ecd.callTransactionArg, callBlockArg, }, - Result: &callResult, - Error: callErr, + Result: ecd.callResult, + Error: ecd.callErr, + }) + } + + // Add the block query to the batch. + var blockResult connectors.BlockMarshaller + var blockError error + batch = append(batch, rpc.BatchElem{ + Method: blockMethod, + Args: []interface{}{ + block, + false, // no full transaction details }, + Result: &blockResult, + Error: blockError, }) + + // Query the RPC. + err := w.ethConn.RawBatchCallContext(timeout, batch) cancel() if err != nil { logger.Error("failed to process query request", zap.Error(err), zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), zap.String("block", block), + zap.Any("batch", batch), zap.String("component", "ccqevm"), ) w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) @@ -620,9 +646,8 @@ func (w *Watcher) Run(parentCtx context.Context) error { if blockError != nil { logger.Error("failed to process query block request", zap.Error(blockError), zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), zap.String("block", block), + zap.Any("batch", batch), zap.String("component", "ccqevm"), ) w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) @@ -632,61 +657,70 @@ func (w *Watcher) Run(parentCtx context.Context) error { if blockResult.Number == nil { logger.Error("invalid query block result", zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), zap.String("block", block), + zap.Any("batch", batch), zap.String("component", "ccqevm"), ) w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) continue } - if callErr != nil { - logger.Error("failed to process query call request", - zap.Error(callErr), zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), - zap.String("block", block), - zap.String("component", "ccqevm"), - ) - w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) - continue - } + resp := []common.EthCallQueryResponse{} + + errFound := false + for idx := range req.EthCallQueryRequest.CallData { + if evmCallData[idx].callErr != nil { + logger.Error("failed to process query call request", + zap.Error(evmCallData[idx].callErr), zap.String("eth_network", w.networkName), + zap.String("block", block), + zap.Int("errorIdx", idx), + zap.Any("batch", batch), + zap.String("component", "ccqevm"), + ) + w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) + errFound = true + break + } + + // Nil or Empty results are not valid + // eth_call will return empty when the state doesn't exist for a block + if len(*evmCallData[idx].callResult) == 0 { + logger.Error("invalid call result", + zap.String("eth_network", w.networkName), + zap.String("block", block), + zap.Int("errorIdx", idx), + zap.Any("batch", batch), + zap.String("component", "ccqevm"), + ) + w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) + errFound = true + break + } - // Nil or Empty results are not valid - // eth_call will return empty when the state doesn't exist for a block - if len(callResult) == 0 { - logger.Error("invalid call result", + logger.Info("query result", zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), zap.String("block", block), + zap.String("blockNumber", blockResult.Number.String()), + zap.String("blockHash", blockResult.Hash.Hex()), + zap.String("blockTime", blockResult.Time.String()), + zap.Int("idx", idx), + zap.String("to", evmCallData[idx].to.Hex()), + zap.Any("data", evmCallData[idx].data), + zap.String("result", evmCallData[idx].callResult.String()), zap.String("component", "ccqevm"), ) - w.ccqSendQueryResponse(logger, queryRequest, common.QueryRetryNeeded, nil) - continue - } - logger.Info("query result", - zap.String("eth_network", w.networkName), - zap.String("to", to.Hex()), - zap.Any("data", data), - zap.String("block", block), - zap.String("blockNumber", blockResult.Number.String()), - zap.String("blockHash", blockResult.Hash.Hex()), - zap.String("blockTime", blockResult.Time.String()), - zap.String("result", callResult.String()), - zap.String("component", "ccqevm"), - ) - - resp := &common.EthCallQueryResponse{ - Number: blockResult.Number.ToInt(), - Hash: blockResult.Hash, - Time: time.Unix(int64(blockResult.Time), 0), - Result: callResult, + resp = append(resp, common.EthCallQueryResponse{ + Number: blockResult.Number.ToInt(), + Hash: blockResult.Hash, + Time: time.Unix(int64(blockResult.Time), 0), + Result: *evmCallData[idx].callResult, + }) } - w.ccqSendQueryResponse(logger, queryRequest, common.QuerySuccess, resp) + if !errFound { + w.ccqSendQueryResponse(logger, queryRequest, common.QuerySuccess, resp) + } default: logger.Warn("received unsupported request type", @@ -1116,8 +1150,8 @@ func (w *Watcher) SetMaxWaitConfirmations(maxWaitConfirmations uint64) { } // ccqSendQueryResponse sends an error response back to the query handler. -func (w *Watcher) ccqSendQueryResponse(logger *zap.Logger, req *common.QueryRequest, status common.QueryStatus, result *common.EthCallQueryResponse) { - queryResponse := common.CreateQueryResponse(req, status, result) +func (w *Watcher) ccqSendQueryResponse(logger *zap.Logger, req *common.PerChainQueryInternal, status common.QueryStatus, results []common.EthCallQueryResponse) { + queryResponse := common.CreatePerChainQueryResponseInternal(req.RequestID, req.RequestIdx, req.ChainID, status, results) select { case w.queryResponseC <- queryResponse: logger.Debug("published query response error to handler", zap.String("component", "ccqevm")) diff --git a/proto/gossip/v1/gossip.proto b/proto/gossip/v1/gossip.proto index b68135cd8e..aae54677ce 100644 --- a/proto/gossip/v1/gossip.proto +++ b/proto/gossip/v1/gossip.proto @@ -243,17 +243,25 @@ message SignedQueryRequest { } message QueryRequest { + uint32 nonce = 1; + repeated PerChainQueryRequest per_chain_queries = 2; +} + +message PerChainQueryRequest { uint32 chain_id = 1; - uint32 nonce = 2; oneof message { EthCallQueryRequest eth_call_query_request = 3; } } message EthCallQueryRequest { - bytes to = 1; - bytes data = 2; - string block = 3; + string block = 1; + repeated EthCallData call_data = 2; + + message EthCallData { + bytes to = 1; + bytes data = 2; + } } message SignedQueryResponse {