From 64cd9ab7c67c945d755fb4fbd5afb2d352874eea Mon Sep 17 00:00:00 2001 From: sanaz Date: Mon, 25 Sep 2023 14:37:10 -0600 Subject: [PATCH 01/20] fixes mismatch bw the function name and the go doc --- mempool/v1/reactor.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go index 1bbe541b2d..b04de67d37 100644 --- a/mempool/v1/reactor.go +++ b/mempool/v1/reactor.go @@ -37,7 +37,7 @@ type mempoolIDs struct { activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter } -// Reserve searches for the next unused ID and assigns it to the +// ReserveForPeer searches for the next unused ID and assigns it to the // peer. func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { ids.mtx.Lock() @@ -174,7 +174,7 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // broadcast routine checks if peer is gone and returns } -// Receive implements Reactor. +// ReceiveEnvelope implements Reactor. // It adds any received transactions to the mempool. func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) @@ -191,7 +191,7 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { } protoTxs := msg.GetTxs() if len(protoTxs) == 0 { - memR.Logger.Error("received tmpty txs from peer", "src", e.Src) + memR.Logger.Error("received empty txs from peer", "src", e.Src) return } txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)} @@ -240,7 +240,7 @@ type PeerState interface { GetHeight() int64 } -// Send new mempool txs to peer. +// broadcastTxRoutine sends new mempool txs to peer. func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { peerID := memR.ids.GetForPeer(peer) var next *clist.CElement From ed9eb2e7ca64435760be6da99178fcf11c3114cd Mon Sep 17 00:00:00 2001 From: sanaz Date: Wed, 27 Sep 2023 15:27:28 -0600 Subject: [PATCH 02/20] adds the draft of the mempool tx flow --- blockchain/v1/README.md | 121 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 blockchain/v1/README.md diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md new file mode 100644 index 0000000000..8b7e851de1 --- /dev/null +++ b/blockchain/v1/README.md @@ -0,0 +1,121 @@ +TX flow in the mempool + +A node can receive a transaction in two ways: +Either by a user sending a transaction to the node, or by the node receiving a transaction from another node. + +The node has a mempool, in which it stores all the transactions it has received. +The only reactor at the p2p level that gets involved in sending and +receiving transactions is the mempool reactor. +This reactor is installed on a specific channel ID, referred to by +`MempoolChannel` with the byte value of `0x30`. +It is worth noting that there is a 1-1 mapping between reactors and their +mounted channels. This means that the mempool reactor is the only reactor +that is mounted on channel ID `0x30`. + + +The total number of connected peers are limited by `max_num_inbound_peers` +or `max_num_outbound_peers` in the config.toml file. According to the +current default configs, the former is set to `40` and the latter is set to +`10` (excluding persistent peers). + +[//]: # (We may want to extract the max number of persistent peers as well) + + +The first step in the transaction life-cycle is the validity check. +If all checks pass successfully, then the message goes throughout the following +steps. The transaction is broadcast to all the connected and running peers +who support the +mempool channel ID. The broadcast happens regardless of the connection type +of the peer, being inbound or outbound. +When a transaction is sent to a peer, the mempool marks that peer to not to +send that transaction again. + +If the transaction is received from another peer, then the validity check +takes place. Also, the peer is marked to not to send that transaction again. + +If a peer is one height behind the height at which the transaction is +received, then the tx is sent to that peer. Otherwise, the mempool waits +until the peer catches up. + + +When a block is committed, the block transactions are removed from the +mempool and remaining transactions are re-checked for their TTL +and are +removed from the mempool if they have been expired. ([ref](https://github. +com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776 +/state/execution.go#L324)). Additionally, they are checked for their +validity given the updated state. Any invalid transaction is removed. + +[//]: # (Is it possible that the marks are erased and the transaction is +sent again? for example when the mempool is full and then gets erased) +[//]: # (Is a transaction resent after Recheck: NO) + +The mempool reactor operates independent of the other reactors. +So, the consensus round, block height, does not affect its logic. +OR it does, when it rechecks all the transactions after every block is +committed and state has changed. + +[?] Is there any cap on the number of transactions sent to another peer? No + +The max size of a transaction: `MaxTxBytes: 1024 * 1024, // 1MB` +The mempool can accomodate up to `Size` `5000` many transactions with total +size of `MaxTxBytes=1GB`. +Each transaction can stay in the mempool for `TTLDuration` and `TTLNumBlocks` +after its insertion to the mempool. + +[//]: # (Consider the case that one sends 1000 txs with size 1MB, filling up +the entire mempool size wise, in that case, we keep erasing the past +transactions. is it even possible? do we have a limit on the incoming +bandwidth? consider a mempool size of 1, and then make an example) + +At any connection, there can be at most 2 instances of the same transaction. + +Each p2p connection is also subject to the following limits per channel: +```markdown + +SendRate: defaultSendRate,= int64(512000) // 500KB/s [?] How it +is enforced? +RecvRate: defaultRecvRate, = int64(512000) // 500KB/ +MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, // 1024 +FlushThrottle: defaultFlushThrottle, +PingInterval: defaultPingInterval, +PongTimeout: defaultPongTimeout, + +``` +ref https://github.com/celestiaorg/celestia-core/blob +/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177 +(rate in this function is the send or rec rate) + + +## Traffic Rate Analysis +**test configuration:** +`d`: Node degree (total incoming and outgoing connections) +transaction rate: `TNR` total number of transactions per second submitted to the +network +transaction rate: `TSR` total number of transactions per second submitted to the +network +`C`: total number of connections in the network + +incoming traffic rate `itr` +outgoing traffic rate `otr` +In the worst case scenario: a transaction is exchanged by the two ends of +connection simultaneously, contributing to both incoming and outgoing traffic. +In a network, with transaction rate `T` and a node with `d` degree, the +`itr` and `otr` are calculated as follows: +`itr = d * T` +`otr = d * T` +unique-transaction-rate `UTR` is the number of unique transactions per +second which should be `TNR`. + +Desired network transaction throughput `network_tx_throughput` in bytes/sec +is capped by the `block_size` and `block_time` as follows: +`max_network_tx_throughput = block_size / block_time` + +For a node, in order to be able to handle this throughput in the worst case, +the node will undertake the following traffic: +`itr = d * max_network_tx_throughput` +`otr = d * max_network_tx_throughput` + +So the total bandwidth requirement for a node, just due to the mempool +operation is, `d * max_network_tx_throughput` for download and upload. + From 993c1228977f206c80cb0f87ac1d4f002826e904 Mon Sep 17 00:00:00 2001 From: sanaz Date: Wed, 27 Sep 2023 15:27:50 -0600 Subject: [PATCH 03/20] fixes godoc --- libs/clist/clist.go | 2 +- mempool/v1/mempool.go | 32 ++++++++++++++++---------------- p2p/conn/connection.go | 2 +- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/libs/clist/clist.go b/libs/clist/clist.go index 8f27743e80..8d27402834 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -312,7 +312,7 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } -// Panics if list grows beyond its max length. +// PushBack panics if list grows beyond its max length. func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index fa372d53ae..7df8f081d6 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -136,8 +136,8 @@ func (txmp *TxMempool) FlushAppConn() error { // We could just not require the caller to hold the lock at all, but the // semantics of the Mempool interface require the caller to hold it, and we // can't change that without disrupting existing use. - txmp.mtx.Unlock() - defer txmp.mtx.Lock() + txmp.Unlock() + defer txmp.Lock() return txmp.proxyAppConn.FlushSync() } @@ -145,8 +145,8 @@ func (txmp *TxMempool) FlushAppConn() error { // EnableTxsAvailable enables the mempool to trigger events when transactions // are available on a block by block basis. func (txmp *TxMempool) EnableTxsAvailable() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() txmp.txsAvailable = make(chan struct{}, 1) } @@ -244,8 +244,8 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp // mempool. It reports an error if no such transaction exists. This operation // does not remove the transaction from the cache. func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() return txmp.removeTxByKey(txKey) } @@ -280,8 +280,8 @@ func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { // Flush purges the contents of the mempool and the cache, leaving both empty. // The current height is not modified by this operation. func (txmp *TxMempool) Flush() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() // Remove all the transactions in the list explicitly, so that the sizes // and indexes get updated properly. @@ -449,8 +449,8 @@ func (txmp *TxMempool) Update( // // Finally, the new transaction is added and size stats updated. func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() var err error if txmp.postCheck != nil { @@ -613,8 +613,8 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { // that case is handled by addNewTransaction instead. func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.ResponseCheckTx) { txmp.metrics.RecheckTimes.Add(1) - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() // Find the transaction reported by the ABCI callback. It is possible the // transaction was evicted during the recheck, in which case the transaction @@ -699,8 +699,8 @@ func (txmp *TxMempool) recheckTransactions() { // When recheck is complete, trigger a notification for more transactions. _ = g.Wait() - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() txmp.notifyTxsAvailable() }() } @@ -728,8 +728,8 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { // the txpool looped through all transactions and if so, performs a purge of any transaction // that has expired according to the TTLDuration. This is thread safe. func (txmp *TxMempool) CheckToPurgeExpiredTxs() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() + txmp.Lock() + defer txmp.Unlock() if txmp.config.TTLDuration > 0 && time.Since(txmp.lastPurgeTime) > txmp.config.TTLDuration { txmp.purgeExpiredTxs(txmp.height) } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 87ab2ed28a..279ae23026 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -347,7 +347,7 @@ func (c *MConnection) stopForError(r interface{}) { } } -// Queues a message to be sent to channel. +// Send queues a message to be sent to channel. func (c *MConnection) Send(chID byte, msgBytes []byte) bool { if !c.IsRunning() { return false From fc86d4b78480d735e3472259d310cec4b1bd73f7 Mon Sep 17 00:00:00 2001 From: sanaz Date: Wed, 27 Sep 2023 17:07:21 -0600 Subject: [PATCH 04/20] adds mode on the relation bw individual nodes traffic rate and the network throughput --- blockchain/v1/README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md index 8b7e851de1..3bbe273aa9 100644 --- a/blockchain/v1/README.md +++ b/blockchain/v1/README.md @@ -96,14 +96,19 @@ transaction rate: `TSR` total number of transactions per second submitted to the network `C`: total number of connections in the network +We assume all the transactions comply with the trnasaction size limit as +specified in the mempool config. +We assume all the transactions are valid and are accepted by the mempool. + incoming traffic rate `itr` outgoing traffic rate `otr` In the worst case scenario: a transaction is exchanged by the two ends of connection simultaneously, contributing to both incoming and outgoing traffic. In a network, with transaction rate `T` and a node with `d` degree, the `itr` and `otr` are calculated as follows: -`itr = d * T` -`otr = d * T` +`itr = min(bw_req / d, d * T, channel_recv_rate)` +`otr = min(bw_req / d, d * T, channel_recv_rate)` + unique-transaction-rate `UTR` is the number of unique transactions per second which should be `TNR`. @@ -116,6 +121,8 @@ the node will undertake the following traffic: `itr = d * max_network_tx_throughput` `otr = d * max_network_tx_throughput` -So the total bandwidth requirement for a node, just due to the mempool +So the minimum bandwidth requirement for a node, just due to the mempool operation is, `d * max_network_tx_throughput` for download and upload. +If we set the bw requirement to `bw_req`, then the `nextwork_tx_throughput` +is at most `bw_req / d` bytes/sec. From fd9fc8ca9dd47ba880113ff3098d0d2f33d547ab Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 11:41:12 -0600 Subject: [PATCH 05/20] some reorganization of the content, putting it into itemized processes --- blockchain/v1/README.md | 86 ++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 44 deletions(-) diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md index 3bbe273aa9..c0f495199c 100644 --- a/blockchain/v1/README.md +++ b/blockchain/v1/README.md @@ -1,27 +1,35 @@ -TX flow in the mempool +## Mempool Protocol Description -A node can receive a transaction in two ways: -Either by a user sending a transaction to the node, or by the node receiving a transaction from another node. - -The node has a mempool, in which it stores all the transactions it has received. -The only reactor at the p2p level that gets involved in sending and -receiving transactions is the mempool reactor. -This reactor is installed on a specific channel ID, referred to by -`MempoolChannel` with the byte value of `0x30`. -It is worth noting that there is a 1-1 mapping between reactors and their +The p2p latyer of Celestia-core which is a fork of CometBFT, is comprised of channels and reactors. Peers form a network around the channels they support. Message propagation in a channel is dictated by the reactor associated with that channel. In other words, a reactor holds the protocol logic for that channel. One of such channels is the mempool channel, which is reffered to by `MempoolChannel` and holds a specific channel ID of `0x30`. The mempool reactor is responsible for the propagation of transactions in the network. It is worth noting that there is a 1-1 mapping between reactors and their mounted channels. This means that the mempool reactor is the only reactor -that is mounted on channel ID `0x30`. - - -The total number of connected peers are limited by `max_num_inbound_peers` -or `max_num_outbound_peers` in the config.toml file. According to the -current default configs, the former is set to `40` and the latter is set to -`10` (excluding persistent peers). +that is mounted on the memppol channel ID `0x30`. In this document, we describe the mempool reactor and its protocol. We also provide an anlysis around the traffic rate of the mempool protocol. -[//]: # (We may want to extract the max number of persistent peers as well) - - -The first step in the transaction life-cycle is the validity check. +## Mempool Reactor +A node can receive a transaction in two ways: +Either by a user sending a transaction to the node, or by the node receiving a transaction from another node. +1. When a transaction is received: +2. its validity is checked, and if it is valid, it is added to the mempool. The transaction's height is set to the current block height. +3. If the transaction is received from another peer, the sending peer is + marked so that the transaction is not sent to that peer again. +2. The transactions is added to the mempool. At this point there will be two concurrent processes: + 3. **Mempool life-cycle**: + 3. Transactions remain the mempool until a block is committed, at which point: + 4. the block transactions are removed from the mempool + 5. remaining transactions are re-checked for their TTL + and are removed from the mempool if they have been expired. ([ref](https://github. + com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776 + /state/execution.go#L324)). + 6. remaining transactions are checked for their validity given the updated state. Any invalid transaction is removed. + 3. **Broadcast process**: For every peer and for every transaction in the mempool, the following takes place: + 1. The peer is sent a copy of the transaction if: + 3. peer is running + 4. supports the mempool channel ID. + 5. The max block height difference between the peer and the tx is one. + Otherwise, there will be a waiting time to let the peer catches up. + 1. Each transaction is sent only once and the receving peer is marked + as not to send that transaction again. + + -The mempool reactor operates independent of the other reactors. -So, the consensus round, block height, does not affect its logic. -OR it does, when it rechecks all the transactions after every block is -committed and state has changed. -[?] Is there any cap on the number of transactions sent to another peer? No +## Configurations +The total number of connected peers are limited by `max_num_inbound_peers` +or `max_num_outbound_peers` in the config.toml file. According to the +current default configs, the former is set to `40` and the latter is set to +`10` (excluding persistent peers). The max size of a transaction: `MaxTxBytes: 1024 * 1024, // 1MB` The mempool can accomodate up to `Size` `5000` many transactions with total @@ -63,13 +67,6 @@ size of `MaxTxBytes=1GB`. Each transaction can stay in the mempool for `TTLDuration` and `TTLNumBlocks` after its insertion to the mempool. -[//]: # (Consider the case that one sends 1000 txs with size 1MB, filling up -the entire mempool size wise, in that case, we keep erasing the past -transactions. is it even possible? do we have a limit on the incoming -bandwidth? consider a mempool size of 1, and then make an example) - -At any connection, there can be at most 2 instances of the same transaction. - Each p2p connection is also subject to the following limits per channel: ```markdown @@ -80,7 +77,6 @@ MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, // 1024 FlushThrottle: defaultFlushThrottle, PingInterval: defaultPingInterval, PongTimeout: defaultPongTimeout, - ``` ref https://github.com/celestiaorg/celestia-core/blob /3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177 @@ -88,6 +84,8 @@ ref https://github.com/celestiaorg/celestia-core/blob ## Traffic Rate Analysis +At any connection, there can be at most 2 instances of the same transaction. + **test configuration:** `d`: Node degree (total incoming and outgoing connections) transaction rate: `TNR` total number of transactions per second submitted to the From fd2c72a2c30e51b61f49d9473a9ab3f0d5433805 Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 12:37:22 -0600 Subject: [PATCH 06/20] a lot of revisions and content addition --- blockchain/v1/README.md | 138 ++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 82 deletions(-) diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md index c0f495199c..ea93cbba2b 100644 --- a/blockchain/v1/README.md +++ b/blockchain/v1/README.md @@ -1,50 +1,32 @@ ## Mempool Protocol Description -The p2p latyer of Celestia-core which is a fork of CometBFT, is comprised of channels and reactors. Peers form a network around the channels they support. Message propagation in a channel is dictated by the reactor associated with that channel. In other words, a reactor holds the protocol logic for that channel. One of such channels is the mempool channel, which is reffered to by `MempoolChannel` and holds a specific channel ID of `0x30`. The mempool reactor is responsible for the propagation of transactions in the network. It is worth noting that there is a 1-1 mapping between reactors and their -mounted channels. This means that the mempool reactor is the only reactor -that is mounted on the memppol channel ID `0x30`. In this document, we describe the mempool reactor and its protocol. We also provide an anlysis around the traffic rate of the mempool protocol. +The Celestia-core's p2p layer, which is a fork of CometBFT, consists of channels and reactors. Peers establish connections within specific channels, effectively forming peer-to-peer groups (each channel represents such a group). The transmission of messages within a channel is governed by the associated reactor, essentially containing the protocol rules for that channel. + +One notable channel is the mempool channel, identified as `MempoolChannel` with a specific channel ID of `0x30`. The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. ## Mempool Reactor -A node can receive a transaction in two ways: -Either by a user sending a transaction to the node, or by the node receiving a transaction from another node. -1. When a transaction is received: -2. its validity is checked, and if it is valid, it is added to the mempool. The transaction's height is set to the current block height. -3. If the transaction is received from another peer, the sending peer is - marked so that the transaction is not sent to that peer again. -2. The transactions is added to the mempool. At this point there will be two concurrent processes: - 3. **Mempool life-cycle**: - 3. Transactions remain the mempool until a block is committed, at which point: - 4. the block transactions are removed from the mempool - 5. remaining transactions are re-checked for their TTL - and are removed from the mempool if they have been expired. ([ref](https://github. - com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776 - /state/execution.go#L324)). - 6. remaining transactions are checked for their validity given the updated state. Any invalid transaction is removed. - 3. **Broadcast process**: For every peer and for every transaction in the mempool, the following takes place: - 1. The peer is sent a copy of the transaction if: - 3. peer is running - 4. supports the mempool channel ID. - 5. The max block height difference between the peer and the tx is one. - Otherwise, there will be a waiting time to let the peer catches up. - 1. Each transaction is sent only once and the receving peer is marked - as not to send that transaction again. - - -## Configurations -The total number of connected peers are limited by `max_num_inbound_peers` -or `max_num_outbound_peers` in the config.toml file. According to the -current default configs, the former is set to `40` and the latter is set to -`10` (excluding persistent peers). +## Constraints and Configurations +The relevant constraints and configurations for the mempool are as follows ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L758)): +- `Size`: This parameter specifies the total number of transactions that the mempool can hold, with a maximum value of `5000`. +- `MaxTxBytes`: The `MaxTxBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB`. +- `TTLDuration` and `TTLNumBlocks`: These settings determine the time and block height after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, thought it may be rewritten on the app side. + +- `MaxTxSize`: The `MaxTxSize` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. -The max size of a transaction: `MaxTxBytes: 1024 * 1024, // 1MB` -The mempool can accomodate up to `Size` `5000` many transactions with total -size of `MaxTxBytes=1GB`. -Each transaction can stay in the mempool for `TTLDuration` and `TTLNumBlocks` -after its insertion to the mempool. +For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): -Each p2p connection is also subject to the following limits per channel: -```markdown +- `SendRate`: The `SendRate` parameter enforces a default sending rate of `500KB/s` (500 kilobytes per second). It ensures that data is sent at this maximum rate. +- `RecvRate`: The `RecvRate` parameter enforces a default receiving rate of `500KB/s` (500 kilobytes per second). It ensures that data is received at this maximum rate. +- `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. -SendRate: defaultSendRate,= int64(512000) // 500KB/s [?] How it -is enforced? -RecvRate: defaultRecvRate, = int64(512000) // 500KB/ -MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, // 1024 -FlushThrottle: defaultFlushThrottle, -PingInterval: defaultPingInterval, -PongTimeout: defaultPongTimeout, -``` -ref https://github.com/celestiaorg/celestia-core/blob -/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177 -(rate in this function is the send or rec rate) + +Peer related configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: +- `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers (excluding persistent peers). ## Traffic Rate Analysis -At any connection, there can be at most 2 instances of the same transaction. - -**test configuration:** -`d`: Node degree (total incoming and outgoing connections) -transaction rate: `TNR` total number of transactions per second submitted to the +In the analsysis provided below, we consider the knowledge of the following network parameters +- `d`: Node degree (total incoming and outgoing connections) +- transaction rate: `transaction_num_rate` total number of transactions per second submitted to the network -transaction rate: `TSR` total number of transactions per second submitted to the +- transaction rate: `transaction_size_rate` total size of transactions per second submitted to the network -`C`: total number of connections in the network +- `C`: total number of connections in the network -We assume all the transactions comply with the trnasaction size limit as +We additionally assume all the transactions comply with the trnasaction size limit as specified in the mempool config. We assume all the transactions are valid and are accepted by the mempool. +We also assume all the peers are up and running. -incoming traffic rate `itr` -outgoing traffic rate `otr` -In the worst case scenario: a transaction is exchanged by the two ends of +### Traffic Rate Analysis for a Node +We distinguish between the incoming and outgoing traffic rate, and denote each by `incoming_traffic_rate` and `outgoing_traffic_rate`, respectively. +Worst case scenario: a transaction is exchanged by the two ends of connection simultaneously, contributing to both incoming and outgoing traffic. -In a network, with transaction rate `T` and a node with `d` degree, the +In a network, with transaction rate `transaction_size_rate` and a node with `d` degree, the `itr` and `otr` are calculated as follows: -`itr = min(bw_req / d, d * T, channel_recv_rate)` -`otr = min(bw_req / d, d * T, channel_recv_rate)` +`incoming_traffic_rate = d * transaction_size_rate` +`outgoing_traffic_rate = d * transaction_size_rate` + +`incoming_traffic_rate = min(bw_req / d, d * T, channel_recv_rate)` +`outgoing_traffic_rate = min(bw_req / d, d * T, channel_send_rate)` -unique-transaction-rate `UTR` is the number of unique transactions per -second which should be `TNR`. +### Traffic Rate Analysis for the Network Desired network transaction throughput `network_tx_throughput` in bytes/sec is capped by the `block_size` and `block_time` as follows: `max_network_tx_throughput = block_size / block_time` From 4f5fd86cd8a2f16e73886a3b7a57685846ca754c Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 15:28:13 -0600 Subject: [PATCH 07/20] furthers the node traffic analysis --- blockchain/v1/README.md | 54 +++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md index ea93cbba2b..2e7ea8491d 100644 --- a/blockchain/v1/README.md +++ b/blockchain/v1/README.md @@ -40,15 +40,14 @@ bandwidth? consider a mempool size of 1, and then make an example) ---> ## Constraints and Configurations The relevant constraints and configurations for the mempool are as follows ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L758)): - `Size`: This parameter specifies the total number of transactions that the mempool can hold, with a maximum value of `5000`. -- `MaxTxBytes`: The `MaxTxBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB`. -- `TTLDuration` and `TTLNumBlocks`: These settings determine the time and block height after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, thought it may be rewritten on the app side. - +- `MaxTxBytes`: The `MaxTxBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB` by default consensus configs but is later modified on the celestia-app side to `128 * 128 * 482 = 7897088 = 7897.088 KB = 7.897 MB`. +- `TTLNumBlocks` and `TTLDuration` : These settings determine the number of blocks and time after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, however, on [celestia-app side](https://github.com/celestiaorg/celestia-app/blob/0d70807442ba0545058d353b44f6f9a583d3e11d/app/default_overrides.go#L209) these values are over-written to `5` and `5*15 s`, respectively. - `MaxTxSize`: The `MaxTxSize` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): -- `SendRate`: The `SendRate` parameter enforces a default sending rate of `500KB/s` (500 kilobytes per second). It ensures that data is sent at this maximum rate. -- `RecvRate`: The `RecvRate` parameter enforces a default receiving rate of `500KB/s` (500 kilobytes per second). It ensures that data is received at this maximum rate. +- `SendRate`: The `SendRate` parameter enforces a default sending rate of `500KB/s`. It ensures that data is sent at this maximum rate. +- `RecvRate`: The `RecvRate` parameter enforces a default receiving rate of `500KB/s`. It ensures that data is received at this maximum rate. - `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. @@ -56,45 +55,32 @@ For each connection, the following limits apply per channel ID ([ref](https://gi Peer related configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: - `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers (excluding persistent peers). + + ## Traffic Rate Analysis -In the analsysis provided below, we consider the knowledge of the following network parameters +In the analysis provided below, we consider the knowledge of the following network parameters - `d`: Node degree (total incoming and outgoing connections) -- transaction rate: `transaction_num_rate` total number of transactions per second submitted to the -network -- transaction rate: `transaction_size_rate` total size of transactions per second submitted to the -network + +- `transaction_rate` which specifies that total size of transactions in bytes per second submitted to the network. - `C`: total number of connections in the network -We additionally assume all the transactions comply with the trnasaction size limit as -specified in the mempool config. -We assume all the transactions are valid and are accepted by the mempool. +Transactions are assumed to comply with the transaction size, are valid and are accepted by the mempool. We also assume all the peers are up and running. ### Traffic Rate Analysis for a Node -We distinguish between the incoming and outgoing traffic rate, and denote each by `incoming_traffic_rate` and `outgoing_traffic_rate`, respectively. +We distinguish between the incoming and outgoing traffic rate, and denote them by `incoming_traffic_rate` and `outgoing_traffic_rate`, respectively. Worst case scenario: a transaction is exchanged by the two ends of connection simultaneously, contributing to both incoming and outgoing traffic. -In a network, with transaction rate `transaction_size_rate` and a node with `d` degree, the -`itr` and `otr` are calculated as follows: -`incoming_traffic_rate = d * transaction_size_rate` -`outgoing_traffic_rate = d * transaction_size_rate` - -`incoming_traffic_rate = min(bw_req / d, d * T, channel_recv_rate)` -`outgoing_traffic_rate = min(bw_req / d, d * T, channel_send_rate)` - +In a network, with transaction rate `transaction_rate` and a node with `d` degree, the traffic rates are calculated as follows: +`incoming_traffic_rate = d * transaction_rate` +`outgoing_traffic_rate = d * transaction_rate` -### Traffic Rate Analysis for the Network -Desired network transaction throughput `network_tx_throughput` in bytes/sec -is capped by the `block_size` and `block_time` as follows: -`max_network_tx_throughput = block_size / block_time` +These rates are further constrained by the channel send and receive rates, and bandwidth constraint `bw_limit` if any. +`incoming_traffic_rate = min(bw_limit / d, d * transaction_rate, SendRate)` +`outgoing_traffic_rate = min(bw_limit / d, d * transaction_rate, RecRate)` -For a node, in order to be able to handle this throughput in the worst case, -the node will undertake the following traffic: -`itr = d * max_network_tx_throughput` -`otr = d * max_network_tx_throughput` +Best case scenario: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. +In a network, with transaction rate `transaction_rate` and a node with `d` degree, the node's traffic rate is capped by: +`traffic_rate (=incoming_traffic_rate + outgoing_traffic_rate) = d * transaction_rate` -So the minimum bandwidth requirement for a node, just due to the mempool -operation is, `d * max_network_tx_throughput` for download and upload. -If we set the bw requirement to `bw_req`, then the `nextwork_tx_throughput` -is at most `bw_req / d` bytes/sec. From 50405c94144bd7255ac7eb38c39f103337f161fe Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 15:28:39 -0600 Subject: [PATCH 08/20] adds impact of mempool on other network aspects --- blockchain/v1/README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/blockchain/v1/README.md b/blockchain/v1/README.md index 2e7ea8491d..427c52276c 100644 --- a/blockchain/v1/README.md +++ b/blockchain/v1/README.md @@ -84,3 +84,21 @@ In a network, with transaction rate `transaction_rate` and a node with `d` degre `traffic_rate (=incoming_traffic_rate + outgoing_traffic_rate) = d * transaction_rate` + +### Impact of mempool on other network aspects +- **Block size**: One immediate impact of mempool, is the size of mempool on the block size. Clearly, block size can not exceed the mempool size. In the current setting, the mempool size is at max `7.897 MB` meaning Celestia blocks can get as large as that (excluding block header). +- **Network throughput**: Desired network transaction throughput `max_network_tx_throughput` in bytes/sec + is capped by the `block_size` and `block_time` as follows: + `max_network_tx_throughput = block_size / block_time` + +For a node, in order to be able to handle this throughput in the worst case, +the node will undertake the following traffic: +`itr = d * max_network_tx_throughput` +`otr = d * max_network_tx_throughput` + +So the minimum bandwidth requirement for a node, just due to the mempool +operation is, `2* d * max_network_tx_throughput` for download and upload. + +If we set the bw requirement to `bw_limit`, then the `nextwork_tx_throughput` +is at most `bw_limit / d` bytes/sec. + From 1d9c11914a956ee84588c7da458dc02839368a2e Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 15:43:11 -0600 Subject: [PATCH 09/20] moves the doc under the spec folder --- .../p2p/reactors/mempool-v1.md | 42 +++++++------------ 1 file changed, 15 insertions(+), 27 deletions(-) rename blockchain/v1/README.md => spec/p2p/reactors/mempool-v1.md (76%) diff --git a/blockchain/v1/README.md b/spec/p2p/reactors/mempool-v1.md similarity index 76% rename from blockchain/v1/README.md rename to spec/p2p/reactors/mempool-v1.md index 427c52276c..fcdf942aa8 100644 --- a/blockchain/v1/README.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -9,22 +9,22 @@ A node can receive a transaction through one of two pathways: either a user init 1. The transaction's validity is assessed, and if it passes the validation criteria, it is added to the mempool. Furthermore, the transaction's height is set to match the current block height. 2. **Peer Tracking**: In the event that the transaction originates from another peer, the sending peer is marked to prevent redundant transmission of the same transaction. -Subsequently, there are two concurrent processes underway: + Subsequently, there are two concurrent processes underway: 3. **Mempool Life-cycle**: - - Transactions that find their way into the mempool remain there until one of two conditions is met: either the mempool reaches its capacity limit or a new block is committed. + - Transactions that find their way into the mempool remain there until one of two conditions is met: either the mempool reaches its capacity limit or a new block is committed. - - When a block is committed: - - the transactions within that block are removed from the mempool. - - The remaining transactions are subjected to two checks: - - their Time-to-Live (TTL) is examined, and any transactions that have expired are promptly removed from the mempool (source: [reference](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324)). - - Next, the remaining transactions are re-evaluated for validity against the updated state. Any transactions that are found to be invalid are removed from the mempool. + - When a block is committed: + - the transactions within that block are removed from the mempool. + - The remaining transactions are subjected to two checks: + - their Time-to-Live (TTL) is examined, and any transactions that have expired are promptly removed from the mempool (source: [reference](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324)). + - Next, the remaining transactions are re-evaluated for validity against the updated state. Any transactions that are found to be invalid are removed from the mempool. 4. **Broadcast Process**: -For each peer and for every transaction residing in the mempool, the following actions are taken: - - A copy of the transaction is dispatched to that peer if the peer - - is online - - supports the mempool channel ID - - has a height difference of one (meaning it lags behind the transaction by a single block). If the height difference is greater, a waiting period is observed to allow the peer to catch up. - - **Peer Tracking**: Each transaction is sent to a peer only once, and the recipient peer is marked to prevent the retransmission of the same transaction. + For each peer and for every transaction residing in the mempool, the following actions are taken: + - A copy of the transaction is dispatched to that peer if the peer + - is online + - supports the mempool channel ID + - has a height difference of one (meaning it lags behind the transaction by a single block). If the height difference is greater, a waiting period is observed to allow the peer to catch up. + - **Peer Tracking**: Each transaction is sent to a peer only once, and the recipient peer is marked to prevent the retransmission of the same transaction. - - ## Constraints and Configurations The relevant constraints and configurations for the mempool are as follows ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L758)): - `Size`: This parameter specifies the total number of transactions that the mempool can hold, with a maximum value of `5000`. From 432788873e59295c55691c395d4d831913a932da Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 15:49:25 -0600 Subject: [PATCH 11/20] removes duplicates --- spec/p2p/reactors/mempool-v1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 289efb9127..cae01fd5b3 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -16,7 +16,7 @@ A node can receive a transaction through one of two pathways: either a user init - When a block is committed: - the transactions within that block are removed from the mempool. - The remaining transactions are subjected to two checks: - - their Time-to-Live (TTL) is examined, and any transactions that have expired are promptly removed from the mempool (source: [reference](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324)). + - their Time-to-Live (TTL) is examined, and any transactions that have expired are promptly removed from the mempool ([reference](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324)). - Next, the remaining transactions are re-evaluated for validity against the updated state. Any transactions that are found to be invalid are removed from the mempool. 4. **Broadcast Process**: For each peer and for every transaction residing in the mempool, the following actions are taken: From 4ac0481eb45a7de3e316083b1a3b0c3bc1fb3b6d Mon Sep 17 00:00:00 2001 From: sanaz Date: Thu, 28 Sep 2023 15:51:51 -0600 Subject: [PATCH 12/20] adds a link to the mempool channel ID --- spec/p2p/reactors/mempool-v1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index cae01fd5b3..62e930dffb 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -2,7 +2,7 @@ The Celestia-core's p2p layer, which is a fork of CometBFT, consists of channels and reactors. Peers establish connections within specific channels, effectively forming peer-to-peer groups (each channel represents such a group). The transmission of messages within a channel is governed by the associated reactor, essentially containing the protocol rules for that channel. -One notable channel is the mempool channel, identified as `MempoolChannel` with a specific channel ID of `0x30`. The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. +One notable channel is the mempool channel, identified as [`MempoolChannel`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14) with a specific channel ID of `0x30`. The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. ## Mempool Reactor A node can receive a transaction through one of two pathways: either a user initiates the transaction directly to the node, or the node acquires a transaction from another peer. Upon receiving a transaction, the following steps occur: From d949ccaa5720760fc195eb7342d77dd2d3bf52ae Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 09:52:08 -0700 Subject: [PATCH 13/20] updates p2p send and rec rate limit --- spec/p2p/reactors/mempool-v1.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 62e930dffb..769b6af931 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -35,8 +35,8 @@ The relevant constraints and configurations for the mempool are as follows ([ref For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): -- `SendRate`: The `SendRate` parameter enforces a default sending rate of `500KB/s`. It ensures that data is sent at this maximum rate. -- `RecvRate`: The `RecvRate` parameter enforces a default receiving rate of `500KB/s`. It ensures that data is received at this maximum rate. +- `SendRate`: The `SendRate` parameter enforces a default average sending rate of `5120000 B= 5MB/s`. It ensures that data is sent at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. +- `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of `5120000 B= 5MB/s`. It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. From 4e65b6ca1b2eff6f2898252a594aa2099e0e8a67 Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 09:53:17 -0700 Subject: [PATCH 14/20] links to the channel ID byte value in the src code --- spec/p2p/reactors/mempool-v1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 769b6af931..5dff8953f8 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -2,7 +2,7 @@ The Celestia-core's p2p layer, which is a fork of CometBFT, consists of channels and reactors. Peers establish connections within specific channels, effectively forming peer-to-peer groups (each channel represents such a group). The transmission of messages within a channel is governed by the associated reactor, essentially containing the protocol rules for that channel. -One notable channel is the mempool channel, identified as [`MempoolChannel`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14) with a specific channel ID of `0x30`. The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. +One notable channel is the mempool channel, identified as [`MempoolChannel`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14) with a specific channel ID of [`0x30`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14). The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. ## Mempool Reactor A node can receive a transaction through one of two pathways: either a user initiates the transaction directly to the node, or the node acquires a transaction from another peer. Upon receiving a transaction, the following steps occur: From 87678af4263ce378ceb877d3c08696b58ab52c3a Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 09:56:04 -0700 Subject: [PATCH 15/20] includes link to the send and rec rate --- spec/p2p/reactors/mempool-v1.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 5dff8953f8..395374afcb 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -35,8 +35,8 @@ The relevant constraints and configurations for the mempool are as follows ([ref For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): -- `SendRate`: The `SendRate` parameter enforces a default average sending rate of `5120000 B= 5MB/s`. It ensures that data is sent at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. -- `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of `5120000 B= 5MB/s`. It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. +- `SendRate`: The `SendRate` parameter enforces a default average sending rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L615). It ensures that data is sent at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. +- `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L616). It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. @@ -65,8 +65,8 @@ In a network, with transaction rate `transaction_rate` and a node with `d` degre `outgoing_traffic_rate = d * transaction_rate` These rates are further constrained by the channel send and receive rates, and bandwidth constraint `bw_limit` if any. -`incoming_traffic_rate = min(bw_limit / d, d * transaction_rate, SendRate)` -`outgoing_traffic_rate = min(bw_limit / d, d * transaction_rate, RecRate)` +`incoming_traffic_rate = min(d * transaction_rate, SendRate)` +`outgoing_traffic_rate = min(d * transaction_rate, RecRate)` Best case scenario: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. In a network, with transaction rate `transaction_rate` and a node with `d` degree, the node's traffic rate is capped by: From 49af729e1c2aeb45b846bc8eed888117037ee378 Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 10:09:22 -0700 Subject: [PATCH 16/20] includes some conclusion and sumaries --- spec/p2p/reactors/mempool-v1.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 395374afcb..4993820651 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -39,7 +39,7 @@ For each connection, the following limits apply per channel ID ([ref](https://gi - `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L616). It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. - + Peer related configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: - `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers (excluding persistent peers). @@ -64,7 +64,7 @@ In a network, with transaction rate `transaction_rate` and a node with `d` degre `incoming_traffic_rate = d * transaction_rate` `outgoing_traffic_rate = d * transaction_rate` -These rates are further constrained by the channel send and receive rates, and bandwidth constraint `bw_limit` if any. +These rates are further constrained by the channel send and receive rates. `incoming_traffic_rate = min(d * transaction_rate, SendRate)` `outgoing_traffic_rate = min(d * transaction_rate, RecRate)` @@ -73,9 +73,16 @@ In a network, with transaction rate `transaction_rate` and a node with `d` degre `traffic_rate (=incoming_traffic_rate + outgoing_traffic_rate) = d * transaction_rate` +This yields the following conclusions (to be extended and verified): +- With a known given transaction rate `transaction_rate`, a node's (in + out) traffic rate should range from `d * transaction_rate` to `2 * d * transaction_rate`. +- To handle a particular `transaction_rate` (network throughput), the node's `SendRate` and `RecRate` with `d` connections should be at least `d * transaction_rate` to handle the worst case scenario (this is only to undertake the load incurred by the mempool reactor). + + + + ### Impact of mempool on other network aspects -- **Block size**: One immediate impact of mempool, is the size of mempool on the block size. Clearly, block size can not exceed the mempool size. In the current setting, the mempool size is at max `7.897 MB` meaning Celestia blocks can get as large as that (excluding block header). +- **Block size**: (this to be verified) One immediate impact of mempool, is the size of mempool on the block size. Clearly, block size can not exceed the mempool size. In the current setting, the mempool size is at max `7.897 MB` meaning Celestia blocks can get as large as that (excluding block header). - **Network throughput**: TBC - **Block Time**: TBC From 00bd501caa2f1b8b26abbe2d26c0fb71ba59ca67 Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 12:23:25 -0700 Subject: [PATCH 17/20] makes some modifications and corrections --- spec/p2p/reactors/mempool-v1.md | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 4993820651..2dd92d84d0 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -29,9 +29,9 @@ A node can receive a transaction through one of two pathways: either a user init ## Constraints and Configurations The relevant constraints and configurations for the mempool are as follows ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L758)): - `Size`: This parameter specifies the total number of transactions that the mempool can hold, with a maximum value of `5000`. -- `MaxTxBytes`: The `MaxTxBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB` by default consensus configs but is later modified on the celestia-app side to `128 * 128 * 482 = 7897088 = 7897.088 KB = 7.897 MB`. +- `MaxTxsBytes`: The `MaxTxsBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB` by default. - `TTLNumBlocks` and `TTLDuration` : These settings determine the number of blocks and time after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, however, on [celestia-app side](https://github.com/celestiaorg/celestia-app/blob/0d70807442ba0545058d353b44f6f9a583d3e11d/app/default_overrides.go#L209) these values are over-written to `5` and `5*15 s`, respectively. -- `MaxTxSize`: The `MaxTxSize` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. +- `MaxTxBytes`: The `MaxTxBytes` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): @@ -75,14 +75,6 @@ In a network, with transaction rate `transaction_rate` and a node with `d` degre This yields the following conclusions (to be extended and verified): - With a known given transaction rate `transaction_rate`, a node's (in + out) traffic rate should range from `d * transaction_rate` to `2 * d * transaction_rate`. -- To handle a particular `transaction_rate` (network throughput), the node's `SendRate` and `RecRate` with `d` connections should be at least `d * transaction_rate` to handle the worst case scenario (this is only to undertake the load incurred by the mempool reactor). +- To handle a particular `transaction_rate` (network throughput), the node's `SendRate` and `RecRate` should be at least `transaction_rate` to handle the worst case scenario (this is only to undertake the load incurred by the mempool reactor). - - - -### Impact of mempool on other network aspects -- **Block size**: (this to be verified) One immediate impact of mempool, is the size of mempool on the block size. Clearly, block size can not exceed the mempool size. In the current setting, the mempool size is at max `7.897 MB` meaning Celestia blocks can get as large as that (excluding block header). -- **Network throughput**: TBC -- **Block Time**: TBC - From 822fb6831bf08ccfe86590330d77749bfa0048de Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 13:16:21 -0700 Subject: [PATCH 18/20] includes links to the code and revises a few parts --- spec/p2p/reactors/mempool-v1.md | 51 ++++++++++++++++----------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 2dd92d84d0..7badfb9e30 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -2,9 +2,9 @@ The Celestia-core's p2p layer, which is a fork of CometBFT, consists of channels and reactors. Peers establish connections within specific channels, effectively forming peer-to-peer groups (each channel represents such a group). The transmission of messages within a channel is governed by the associated reactor, essentially containing the protocol rules for that channel. -One notable channel is the mempool channel, identified as [`MempoolChannel`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14) with a specific channel ID of [`0x30`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14). The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the mempool reactor and its protocol, including an analysis of the mempool protocol's traffic rate. +One notable channel is the mempool channel, identified as [`MempoolChannel`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14) with a specific channel ID of [`0x30`](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/mempool/mempool.go#L14). The mempool reactor manages the dissemination of transactions across the network. It's important to highlight that there's a direct correspondence between reactors and the channels they are connected to. Consequently, the mempool reactor is the exclusive reactor linked to the mempool channel with ID `0x30`. This document will provide an in-depth overview of the protocol implemented by the mempool v1 reactor. At the end, we will provide a traffic analysis of the said reactor as well. -## Mempool Reactor +## Mempool V1 Reactor A node can receive a transaction through one of two pathways: either a user initiates the transaction directly to the node, or the node acquires a transaction from another peer. Upon receiving a transaction, the following steps occur: 1. The transaction's validity is assessed, and if it passes the validation criteria, it is added to the mempool. Furthermore, the transaction's height is set to match the current block height. @@ -13,39 +13,39 @@ A node can receive a transaction through one of two pathways: either a user init 3. **Mempool Life-cycle**: - Transactions that find their way into the mempool remain there until one of two conditions is met: either the mempool reaches its capacity limit or a new block is committed. - - When a block is committed: - - the transactions within that block are removed from the mempool. + - When a [block is committed](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324): + - the transactions within that block that are successfully delivered to the app are removed from the mempool ([ref](https://github.com/celestiaorg/celestia-core/blob/993c1228977f206c80cb0f87ac1d4f002826e904/mempool/v1/mempool.go#L418)). They are also placed in the mempool cache ([ref](https://github.com/celestiaorg/celestia-core/blob/993c1228977f206c80cb0f87ac1d4f002826e904/mempool/v1/mempool.go#L411-L412)). - The remaining transactions are subjected to two checks: - - their Time-to-Live (TTL) is examined, and any transactions that have expired are promptly removed from the mempool ([reference](https://github.com/celestiaorg/celestia-core/blob/367caa33ef5ab618ea357189e88044dbdbd17776/state/execution.go#L324)). - - Next, the remaining transactions are re-evaluated for validity against the updated state. Any transactions that are found to be invalid are removed from the mempool. + - their Time-to-Live (TTL) is examined ([ref](https://github.com/celestiaorg/celestia-core/blob/993c1228977f206c80cb0f87ac1d4f002826e904/mempool/v1/mempool.go#L421)), and any transactions that have expired are promptly removed from the mempool ([ref](https://github.com/celestiaorg/celestia-core/blob/993c1228977f206c80cb0f87ac1d4f002826e904/mempool/v1/mempool.go#L743)). + - Next, the remaining transactions are re-evaluated for validity against the updated state ([ref](https://github.com/celestiaorg/celestia-core/blob/993c1228977f206c80cb0f87ac1d4f002826e904/mempool/v1/mempool.go#L429-L430)) duo to the mempool [`recheck` config](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L708) that is set to `true` ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L761)). Any transactions that are found to be invalid are removed from the mempool. 4. **Broadcast Process**: - For each peer and for every transaction residing in the mempool, the following actions are taken: + For each peer and for every transaction residing in the mempool, the following actions are taken ([ref](https://github.com/celestiaorg/celestia-core/blob/64cd9ab7c67c945d755fb4fbd5afb2d352874eea/mempool/v1/reactor.go#L244)): - A copy of the transaction is dispatched to that peer if the peer - is online - - supports the mempool channel ID - - has a height difference of one (meaning it lags behind the transaction by a single block). If the height difference is greater, a waiting period is observed to allow the peer to catch up. - - **Peer Tracking**: Each transaction is sent to a peer only once, and the recipient peer is marked to prevent the retransmission of the same transaction. + - supports the mempool channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/ad660fee8f186d6f7e5e567ea23ea813f5038d90/p2p/peer.go#L319)) + - has a height difference of one (meaning it lags behind the transaction by a single block). If the height difference is greater, a waiting period is observed to allow the peer to catch up ([ref](https://github.com/celestiaorg/celestia-core/blob/64cd9ab7c67c945d755fb4fbd5afb2d352874eea/mempool/v1/reactor.go#L286-L289)). + - **Peer Tracking**: Each transaction is sent to a peer only once, and the recipient peer is marked to prevent the retransmission of the same transaction ([ref](https://github.com/celestiaorg/celestia-core/blob/64cd9ab7c67c945d755fb4fbd5afb2d352874eea/mempool/v1/reactor.go#L304)). ## Constraints and Configurations The relevant constraints and configurations for the mempool are as follows ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L758)): - `Size`: This parameter specifies the total number of transactions that the mempool can hold, with a maximum value of `5000`. - `MaxTxsBytes`: The `MaxTxsBytes` parameter defines the maximum size of the mempool in bytes, with a limit of `1GB` by default. -- `TTLNumBlocks` and `TTLDuration` : These settings determine the number of blocks and time after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, however, on [celestia-app side](https://github.com/celestiaorg/celestia-app/blob/0d70807442ba0545058d353b44f6f9a583d3e11d/app/default_overrides.go#L209) these values are over-written to `5` and `5*15 s`, respectively. -- `MaxTxBytes`: The `MaxTxBytes` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. +- `MaxTxBytes`: The `MaxTxBytes` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. +- `TTLNumBlocks` and `TTLDuration` : These settings determine the number of blocks and time after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, however, on [celestia-app side](https://github.com/celestiaorg/celestia-app/blob/ccfb3e5e87d05d75a92ad85ab199d4f0c4879a0a/app/default_overrides.go#L221-L222) these values are over-written to `5` and `5*15 s`, respectively. -For each connection, the following limits apply per channel ID ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): +For each connection, the following limits apply (on the aggregate traffic rate of all the channels) ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): - `SendRate`: The `SendRate` parameter enforces a default average sending rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L615). It ensures that data is sent at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L616). It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `MaxPacketMsgPayloadSize`: The `MaxPacketMsgPayloadSize` parameter sets the maximum payload size for packet messages to `1024` bytes. - -Peer related configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: -- `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers (excluding persistent peers). - +P2P configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: +- `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers ([excluding persistent peers](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L553-L554)). + + ## Traffic Rate Analysis In the analysis provided below, we consider the knowledge of the following network parameters - `d`: Node degree (total incoming and outgoing connections) @@ -53,27 +53,26 @@ In the analysis provided below, we consider the knowledge of the following netwo - `transaction_rate` which specifies that total size of transactions in bytes per second submitted to the network. - `C`: total number of connections in the network -Transactions are assumed to comply with the transaction size, are valid and are accepted by the mempool. -We also assume all the peers are up and running. +Transactions are assumed to comply with the transaction size, are valid and are accepted by the mempool. We also assume all the peers are up and running. ### Traffic Rate Analysis for a Node We distinguish between the incoming and outgoing traffic rate, and denote them by `incoming_traffic_rate` and `outgoing_traffic_rate`, respectively. -Worst case scenario: a transaction is exchanged by the two ends of +- **Worst case scenario**: a transaction is exchanged by the two ends of connection simultaneously, contributing to both incoming and outgoing traffic. In a network, with transaction rate `transaction_rate` and a node with `d` degree, the traffic rates are calculated as follows: `incoming_traffic_rate = d * transaction_rate` `outgoing_traffic_rate = d * transaction_rate` -These rates are further constrained by the channel send and receive rates. -`incoming_traffic_rate = min(d * transaction_rate, SendRate)` -`outgoing_traffic_rate = min(d * transaction_rate, RecRate)` +These max rates are further constrained by the `SendRate` and `RecRate`. +`incoming_traffic_rate = d * min(transaction_rate, SendRate)` +`outgoing_traffic_rate = d * min(transaction_rate, RecRate)` -Best case scenario: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. -In a network, with transaction rate `transaction_rate` and a node with `d` degree, the node's traffic rate is capped by: +- **Best case scenario**: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. This is because both ends of the connection keep track of the transactions they have seen on a connection (wither vua sending or receiving). If one peer sends a transaction before the other, they both mark it as sent/received, ensuring they do not redundantly transmit it to each other. +In a network, with transaction rate `transaction_rate` and a node with degree of `d`, the node's traffic rate in best case would be: `traffic_rate (=incoming_traffic_rate + outgoing_traffic_rate) = d * transaction_rate` -This yields the following conclusions (to be extended and verified): +We can draw the following conclusions (to be extended and verified): - With a known given transaction rate `transaction_rate`, a node's (in + out) traffic rate should range from `d * transaction_rate` to `2 * d * transaction_rate`. - To handle a particular `transaction_rate` (network throughput), the node's `SendRate` and `RecRate` should be at least `transaction_rate` to handle the worst case scenario (this is only to undertake the load incurred by the mempool reactor). From 0157e81d0a5de8a8d0436a70723f5fd9c9526f84 Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 13:22:40 -0700 Subject: [PATCH 19/20] adds more links --- spec/p2p/reactors/mempool-v1.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index 7badfb9e30..df896bd8b7 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -33,7 +33,7 @@ The relevant constraints and configurations for the mempool are as follows ([ref - `MaxTxBytes`: The `MaxTxBytes` parameter specifies the maximum size of an individual transaction, which is set to `1MB`. - `TTLNumBlocks` and `TTLDuration` : These settings determine the number of blocks and time after which a transaction is removed from the mempool if it has not been included in a block. The default is set to zero, however, on [celestia-app side](https://github.com/celestiaorg/celestia-app/blob/ccfb3e5e87d05d75a92ad85ab199d4f0c4879a0a/app/default_overrides.go#L221-L222) these values are over-written to `5` and `5*15 s`, respectively. -For each connection, the following limits apply (on the aggregate traffic rate of all the channels) ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): +For each peer to peer connection, the following limits apply (on the aggregate traffic rate of all the channels) ([ref](https://github.com/celestiaorg/celestia-core/blob/3f3b7cc57f5cfc5e846ce781a9a407920e54fb72/libs/flowrate/flowrate.go#L177)): - `SendRate`: The `SendRate` parameter enforces a default average sending rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L615). It ensures that data is sent at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. - `RecvRate`: The `RecvRate` parameter enforces a default average receiving rate of [`5120000 B= 5MB/s`](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L616). It ensures that data is received at this maximum rate. This parameter does not seem to be overwritten by the celestia-app. @@ -43,8 +43,8 @@ For each connection, the following limits apply (on the aggregate traffic rate o P2P configs ([ref](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L524)) that would be relevant to the traffic analysis are as follows: -- `max_num_inbound_peers` and `max_num_outbound_peers`: These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers ([excluding persistent peers](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L553-L554)). - +- [`max_num_inbound_peers` and `max_num_outbound_peers`](https://github.com/celestiaorg/celestia-core/blob/37f950717381e8d8f6393437624652693e4775b8/config/config.go#L604-L605): These parameters indicate the total number of inbound and outbound peers, respectively. The default values are `40` for inbound peers and `10` for outbound peers ([excluding persistent peers](https://github.com/celestiaorg/celestia-core/blob/2f93fc823f17c36c7090f84694880c85d3244764/config/config.go#L553-L554)). + ## Traffic Rate Analysis In the analysis provided below, we consider the knowledge of the following network parameters From e28c33dfd9189e04eaf4d49e92504be175bfcd73 Mon Sep 17 00:00:00 2001 From: sanaz Date: Tue, 3 Oct 2023 15:46:04 -0700 Subject: [PATCH 20/20] fixes a typo --- spec/p2p/reactors/mempool-v1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/p2p/reactors/mempool-v1.md b/spec/p2p/reactors/mempool-v1.md index df896bd8b7..54b2148b98 100644 --- a/spec/p2p/reactors/mempool-v1.md +++ b/spec/p2p/reactors/mempool-v1.md @@ -67,7 +67,7 @@ These max rates are further constrained by the `SendRate` and `RecRate`. `incoming_traffic_rate = d * min(transaction_rate, SendRate)` `outgoing_traffic_rate = d * min(transaction_rate, RecRate)` -- **Best case scenario**: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. This is because both ends of the connection keep track of the transactions they have seen on a connection (wither vua sending or receiving). If one peer sends a transaction before the other, they both mark it as sent/received, ensuring they do not redundantly transmit it to each other. +- **Best case scenario**: a transaction is exchanged only once, contributing to either incoming or outgoing traffic. This is because both ends of the connection keep track of the transactions they have seen on a connection (whether via sending or receiving). If one peer sends a transaction before the other, they both mark it as sent/received, ensuring they do not redundantly transmit it to each other. In a network, with transaction rate `transaction_rate` and a node with degree of `d`, the node's traffic rate in best case would be: `traffic_rate (=incoming_traffic_rate + outgoing_traffic_rate) = d * transaction_rate`