diff --git a/main.go b/main.go index e266df21..71072726 100644 --- a/main.go +++ b/main.go @@ -8,7 +8,6 @@ import ( func main() { run.InvokeMap(map[string]interface{}{ - "virtual-payment": run.InitializedTestCaseFn(tests.CreateVirtualPaymentTest), - "multi-hop-virtual-payment": run.InitializedTestCaseFn(tests.CreateMultiHopVirtualPaymentTest), + "virtual-payment": run.InitializedTestCaseFn(tests.CreateVirtualPaymentTest), }) } diff --git a/manifest.toml b/manifest.toml index 423e2b9d..b019e786 100644 --- a/manifest.toml +++ b/manifest.toml @@ -33,24 +33,8 @@ isNightly = {type = "bool", default = false, desc = "Whether this test is being networkJitter = {type = "int", unit = "milliseconds", default = 0} networkLatency = {type = "int", unit = "milliseconds", default = 0} numOfHubs = {type = "int", default = 1, desc = "The number of instances that should play the role of the hub"} +numOfIntermediaries = {type = "int", default = 1, desc = "The number of intermediaries(hops) to use in the virtual payment channel"} numOfPayeePayers = {type = "int", default = 0, desc = "The number of instances that should play the role of the payeepayer"} numOfPayees = {type = "int", default = 1, desc = "The number of instances that should play the role of the payee"} numOfPayers = {type = "int", default = 1, desc = "The number of instances that should play the role of the payer"} paymentTestDuration = {type = "int", default = 10, unit = "seconds"} - -[[testcases]] -instances = {min = 2, max = 100, default = 5} -name = "multi-hop-virtual-payment" - -[testcases.params] -concurrentPaymentJobs = {type = "int", desc = "The number of concurrent payment jobs a peer should attempt to maintain", default = 1} -isCI = {type = "bool", default = false, desc = "Whether this test is being run as from CI"} -isNightly = {type = "bool", default = false, desc = "Whether this test is being run as part of the nightly test suite"} -networkJitter = {type = "int", unit = "milliseconds", default = 0} -networkLatency = {type = "int", unit = "milliseconds", default = 0} -numOfHubs = {type = "int", default = 3, desc = "The number of instances that should play the role of the hub"} -numOfIntermediaries = {type = "int", default = 2, desc = "The number of intermediaries(hops) to use in the virtual payment channel"} -numOfPayeePayers = {type = "int", default = 0, desc = "The number of instances that should play the role of the payeepayer"} -numOfPayees = {type = "int", default = 4, desc = "The number of instances that should play the role of the payee"} -numOfPayers = {type = "int", default = 1, desc = "The number of instances that should play the role of the payer"} -paymentTestDuration = {type = "int", default = 10, unit = "seconds"} diff --git a/tests/multi-hop-virtual-payment.go b/tests/multi-hop-virtual-payment.go deleted file mode 100644 index 28ff40d7..00000000 --- a/tests/multi-hop-virtual-payment.go +++ /dev/null @@ -1,213 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "math/big" - "math/rand" - "os" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/statechannels/go-nitro-testground/chain" - c "github.com/statechannels/go-nitro-testground/config" - "github.com/statechannels/go-nitro-testground/peer" - "github.com/statechannels/go-nitro-testground/utils" - "github.com/statechannels/go-nitro/channel/state/outcome" - nitro "github.com/statechannels/go-nitro/client" - "github.com/statechannels/go-nitro/client/engine" - p2pms "github.com/statechannels/go-nitro/client/engine/messageservice/p2p-message-service" - "github.com/statechannels/go-nitro/client/engine/store" - "github.com/statechannels/go-nitro/protocols" - "github.com/statechannels/go-nitro/types" - "github.com/testground/sdk-go/run" - "github.com/testground/sdk-go/runtime" - "github.com/testground/sdk-go/sync" -) - -func CreateMultiHopVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) error { - // The default frequency of diagnostics is 10 seconds. - // That's a bit too slow for most of our test runs. - runEnv.D().SetFrequency(1 * time.Second) - ctx := context.Background() - - client := init.SyncClient - net := init.NetClient - - networkJitterMS, networkLatencyMS := runEnv.IntParam("networkJitter"), runEnv.IntParam("networkLatency") - // instantiate a network client amd wait for it to be ready. - err := utils.ConfigureNetworkClient(ctx, net, client, runEnv, networkJitterMS, networkLatencyMS) - if err != nil { - panic(err) - } - - seq := init.GlobalSeq - ip := net.MustGetDataNetworkIP() - - runConfig, err := c.GetRunConfig(runEnv) - if err != nil { - panic(err) - } - - role := peer.GetRole(seq, runConfig) - // We use the sequence in the random source so we generate a unique key even if another client is running at the same time - privateKey, err := crypto.GenerateKey() - if err != nil { - panic(err) - } - - pk := crypto.FromECDSA(privateKey) - address := crypto.PubkeyToAddress(privateKey.PublicKey) - port := (START_PORT) + int(seq) - ipAddress := ip.String() - - // Create the ms using the given key - ms := p2pms.NewMessageService(ipAddress, port, pk) - client.MustSignalAndWait(ctx, "msStarted", runEnv.TestInstanceCount) - - mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role, Seq: seq} - me := peer.MyInfo{PeerInfo: mePeerInfo, PrivateKey: *privateKey} - - runEnv.RecordMessage("I am address:%s role:%d seq:%d", me.Address, me.Role, me.Seq) - - utils.RecordRunInfo(me, runConfig, runEnv.D()) - - // Broadcasts our info and get peer info from all other instances. - peers := utils.SharePeerInfo(me.PeerInfo, ctx, client, runEnv.TestInstanceCount) - - // Register our peers with the message service - ms.AddPeers(peer.GetMessageServicePeers(peers)) - client.MustSignalAndWait(ctx, "peersAdded", runEnv.TestInstanceCount) - - store := store.NewMemStore(crypto.FromECDSA(&me.PrivateKey)) - - // We skip the 0x prefix by slicing from index 2 - shortAddress := me.Address.String()[2:8] - logPath := fmt.Sprintf("./outputs/nitro-client-%s-role-%d.log", shortAddress, me.Role) - // The outputs folder will be copied when results are collected. - logDestination, _ := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666) - - // All instances wait until the NitroAdjudicator has been deployed (seq = 1 instance is responsible) - cs := chain.NewChainService(ctx, seq, logDestination) - contractSetup := sync.State("contractSetup") - client.MustSignalEntry(ctx, contractSetup) - client.MustBarrier(ctx, contractSetup, runEnv.TestInstanceCount) - - nClient := nitro.New(ms, cs, store, logDestination, &engine.PermissivePolicy{}, runEnv.D()) - - cm := utils.NewCompletionMonitor(&nClient, runEnv.RecordMessage) - defer cm.Close() - - // We wait until everyone has chosen an address. - client.MustSignalAndWait(ctx, "client created", runEnv.TestInstanceCount) - - client.MustSignalAndWait(ctx, "message service connected", runEnv.TestInstanceCount) - - ledgerIds := utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers) - if len(ledgerIds) > 0 { - runEnv.RecordMessage("%s: Created Ledgers %s", me.Address, utils.AbbreviateSlice(ledgerIds)) - } - - // Create ledger channels with all the hubs - - client.MustSignalAndWait(ctx, sync.State("ledgerDone"), runEnv.TestInstanceCount) - - if me.IsPayer() { - - hubs := peer.FilterByRole(peers, peer.Hub) - payees := peer.FilterByRole(peers, peer.Payee) - payees = append(payees, peer.FilterByRole(peers, peer.PayerPayee)...) - - createVirtualPaymentsJob := func() { - numHops := runEnv.IntParam("numOfIntermediaries") - - selectedHubs := utils.SelectRandomHubs(hubs, numHops) - runEnv.RecordMessage("%s: Selected hubs %s", me.Address, utils.AbbreviateSlice(selectedHubs)) - randomPayee := utils.SelectRandom(payees) - - var channelId types.Destination - runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Time(func() { - - outcome := outcome.Exit{outcome.SingleAssetExit{ - Allocations: outcome.Allocations{ - outcome.Allocation{ - Destination: types.AddressToDestination(me.Address), - Amount: big.NewInt(int64(10 * utils.GWEI_IN_WEI)), - }, - outcome.Allocation{ - Destination: types.AddressToDestination(randomPayee.Address), - Amount: big.NewInt(0), - }, - }, - }} - - r := nClient.CreateVirtualPaymentChannel(selectedHubs, randomPayee.Address, 0, outcome) - - channelId = r.ChannelId - cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{r.Id}) - - runEnv.RecordMessage("Opened virtual channel %s with %s using hubs %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.AbbreviateSlice(selectedHubs)) - - paymentAmount := big.NewInt(utils.KWEI_IN_WEI) - nClient.Pay(r.ChannelId, paymentAmount) - runEnv.RecordMessage("Sent payment of %d wei to %s using channel %s", paymentAmount.Int64(), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(channelId)) - - // TODO: Should we wait for receipt of this payment before stopping the time_to_first_payment timer? - }) - - // Perform between 1 and 5 payments additional payments - amountOfPayments := 1 + rand.Intn(4) - for i := 0; i < amountOfPayments; i++ { - // pay between 1 and 2 kwei - paymentAmount := big.NewInt(utils.KWEI_IN_WEI + (rand.Int63n(utils.KWEI_IN_WEI))) - nClient.Pay(channelId, paymentAmount) - - runEnv.RecordMessage("Sent payment of %d wei to %s using channel %s", paymentAmount.Int64(), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(channelId)) - - } - - // TODO: If we attempt to close a virtual channel too fast we can cause other clients to fail. - // See https://github.com/statechannels/go-nitro/issues/744 - time.Sleep(time.Duration(250 * time.Millisecond)) - - // TODO: get payment balance and output it to the log - runEnv.RecordMessage("Closing %s with payment to %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address)) - closeId := nClient.CloseVirtualChannel(channelId) - cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{closeId}) - - } - - // Run the job(s) - utils.RunJobs(createVirtualPaymentsJob, runConfig.PaymentTestDuration, int64(runConfig.ConcurrentPaymentJobs)) - } - client.MustSignalAndWait(ctx, "paymentsDone", runEnv.TestInstanceCount) - - if me.Role != peer.Hub { - // TODO: Closing a ledger channel too soon after closing a virtual channel seems to fail. - time.Sleep(time.Duration(250 * time.Millisecond)) - // Close all the ledger channels with the hub - oIds := []protocols.ObjectiveId{} - for _, ledgerId := range ledgerIds { - runEnv.RecordMessage("Closing ledger %s", utils.Abbreviate(ledgerId)) - oId := nClient.CloseLedgerChannel(ledgerId) - oIds = append(oIds, oId) - } - cm.WaitForObjectivesToComplete(oIds) - runEnv.RecordMessage("All ledger channels closed") - } - - // Record the mean time to first payment to nightly/ci metrics if applicable - // This allows us to track performance over time - mean := runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Mean() - if runEnv.BooleanParam("isNightly") { - runEnv.R().RecordPoint(fmt.Sprintf("nightly_mean_time_to_first_payment,me=%s", me.Address), float64(mean)) - } - if runEnv.BooleanParam("isCI") { - runEnv.R().RecordPoint(fmt.Sprintf("ci_mean_time_to_first_payment,me=%s", me.Address), float64(mean)) - } - - client.MustSignalAndWait(ctx, "done", runEnv.TestInstanceCount) - - return nil - -} diff --git a/tests/virtual-payment.go b/tests/virtual-payment.go index 3b536f52..5dff7a62 100644 --- a/tests/virtual-payment.go +++ b/tests/virtual-payment.go @@ -29,7 +29,9 @@ import ( const START_PORT = 49000 func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) error { - + // The default frequency of diagnostics is 10 seconds. + // That's a bit too slow for most of our test runs. + runEnv.D().SetFrequency(1 * time.Second) ctx := context.Background() client := init.SyncClient @@ -69,9 +71,9 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role, Seq: seq} me := peer.MyInfo{PeerInfo: mePeerInfo, PrivateKey: *privateKey} - runEnv.RecordMessage("I am %+v", me) + runEnv.RecordMessage("I am address:%s role:%d seq:%d", me.Address, me.Role, me.Seq) - utils.RecordRunInfo(me, runConfig, runEnv.R()) + utils.RecordRunInfo(me, runConfig, runEnv.D()) // Broadcasts our info and get peer info from all other instances. peers := utils.SharePeerInfo(me.PeerInfo, ctx, client, runEnv.TestInstanceCount) @@ -94,7 +96,7 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err client.MustSignalEntry(ctx, contractSetup) client.MustBarrier(ctx, contractSetup, runEnv.TestInstanceCount) - nClient := nitro.New(ms, cs, store, logDestination, &engine.PermissivePolicy{}, runEnv.R()) + nClient := nitro.New(ms, cs, store, logDestination, &engine.PermissivePolicy{}, runEnv.D()) cm := utils.NewCompletionMonitor(&nClient, runEnv.RecordMessage) defer cm.Close() @@ -104,14 +106,13 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err client.MustSignalAndWait(ctx, "message service connected", runEnv.TestInstanceCount) - ledgerIds := []types.Destination{} - - if me.Role != peer.Hub { - // Create ledger channels with all the hubs - ledgerIds = utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers) - + ledgerIds := utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers) + if len(ledgerIds) > 0 { + runEnv.RecordMessage("%s: Created Ledgers %s", me.Address, utils.AbbreviateSlice(ledgerIds)) } + // Create ledger channels with all the hubs + client.MustSignalAndWait(ctx, sync.State("ledgerDone"), runEnv.TestInstanceCount) if me.IsPayer() { @@ -121,11 +122,14 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err payees = append(payees, peer.FilterByRole(peers, peer.PayerPayee)...) createVirtualPaymentsJob := func() { - randomHub := utils.SelectRandom(hubs) + numHops := runEnv.IntParam("numOfIntermediaries") + + selectedHubs := utils.SelectRandomHubs(hubs, numHops) + runEnv.RecordMessage("%s: Selected hubs %s", me.Address, utils.AbbreviateSlice(selectedHubs)) randomPayee := utils.SelectRandom(payees) var channelId types.Destination - runEnv.R().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Time(func() { + runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Time(func() { outcome := outcome.Exit{outcome.SingleAssetExit{ Allocations: outcome.Allocations{ @@ -140,12 +144,12 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err }, }} - r := nClient.CreateVirtualPaymentChannel([]types.Address{randomHub.Address}, randomPayee.Address, 0, outcome) + r := nClient.CreateVirtualPaymentChannel(selectedHubs, randomPayee.Address, 0, outcome) channelId = r.ChannelId cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{r.Id}) - runEnv.RecordMessage("Opened virtual channel %s with %s using hub %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(randomHub.Address)) + runEnv.RecordMessage("Opened virtual channel %s with %s using hubs %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.AbbreviateSlice(selectedHubs)) paymentAmount := big.NewInt(utils.KWEI_IN_WEI) nClient.Pay(r.ChannelId, paymentAmount) @@ -197,7 +201,7 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err // Record the mean time to first payment to nightly/ci metrics if applicable // This allows us to track performance over time - mean := runEnv.R().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Mean() + mean := runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Mean() if runEnv.BooleanParam("isNightly") { runEnv.R().RecordPoint(fmt.Sprintf("nightly_mean_time_to_first_payment,me=%s", me.Address), float64(mean)) }