Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Guardian now produces rollup based on interval time and available bat… #1536

Merged
merged 9 commits into from
Sep 25, 2023
7 changes: 7 additions & 0 deletions go/config/host_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@ type HostInputConfig struct {

// Whether inbound p2p is enabled or not
IsInboundP2PDisabled bool

// MaxRollupSize specifies the threshold size which the sequencer-host publishes a rollup
MaxRollupSize uint64
}

// ToHostConfig returns a HostConfig given a HostInputConfig
Expand Down Expand Up @@ -131,6 +134,7 @@ func (p HostInputConfig) ToHostConfig() *HostConfig {
RollupInterval: p.RollupInterval,
L1BlockTime: p.L1BlockTime,
IsInboundP2PDisabled: p.IsInboundP2PDisabled,
MaxRollupSize: p.MaxRollupSize,
}
}

Expand All @@ -156,6 +160,8 @@ type HostConfig struct {
BatchInterval time.Duration
// Min interval before creating the next rollup (only used by Sequencer nodes)
RollupInterval time.Duration
// MaxRollupSize is the max size of the rollup
MaxRollupSize uint64
// The expected time between blocks on the L1 network
L1BlockTime time.Duration

Expand Down Expand Up @@ -255,5 +261,6 @@ func DefaultHostParsedConfig() *HostInputConfig {
RollupInterval: 5 * time.Second,
L1BlockTime: 15 * time.Second,
IsInboundP2PDisabled: false,
MaxRollupSize: 1024 * 64,
otherview marked this conversation as resolved.
Show resolved Hide resolved
}
}
1 change: 0 additions & 1 deletion go/enclave/components/batch_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"github.com/obscuronet/go-obscuro/go/common"

"github.com/ethereum/go-ethereum/core/types"

"github.com/obscuronet/go-obscuro/go/enclave/storage"

"github.com/ethereum/go-ethereum/core/state"
Expand Down
3 changes: 3 additions & 0 deletions go/host/container/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ type HostConfigToml struct {
RollupInterval string
IsInboundP2PDisabled bool
L1BlockTime int
MaxRollupSize int
}

// ParseConfig returns a config.HostInputConfig based on either the file identified by the `config` flag, or the flags with
Expand Down Expand Up @@ -89,6 +90,7 @@ func ParseConfig() (*config.HostInputConfig, error) {
batchInterval := flag.String(batchIntervalName, cfg.BatchInterval.String(), flagUsageMap[batchIntervalName])
rollupInterval := flag.String(rollupIntervalName, cfg.RollupInterval.String(), flagUsageMap[rollupIntervalName])
isInboundP2PDisabled := flag.Bool(isInboundP2PDisabledName, cfg.IsInboundP2PDisabled, flagUsageMap[isInboundP2PDisabledName])
maxRollupSize := flag.Uint64(maxRollupSizeFlagName, cfg.MaxRollupSize, flagUsageMap[maxRollupSizeFlagName])

flag.Parse()

Expand Down Expand Up @@ -139,6 +141,7 @@ func ParseConfig() (*config.HostInputConfig, error) {
return nil, err
}
cfg.IsInboundP2PDisabled = *isInboundP2PDisabled
cfg.MaxRollupSize = *maxRollupSize

return cfg, nil
}
Expand Down
2 changes: 2 additions & 0 deletions go/host/container/cli_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ const (
batchIntervalName = "batchInterval"
rollupIntervalName = "rollupInterval"
isInboundP2PDisabledName = "isInboundP2PDisabled"
maxRollupSizeFlagName = "maxRollupSize"
)

// Returns a map of the flag usages.
Expand Down Expand Up @@ -72,5 +73,6 @@ func getFlagUsageMap() map[string]string {
batchIntervalName: "Duration between each batch. Can be put down as 1.0s",
rollupIntervalName: "Duration between each rollup. Can be put down as 1.0s",
isInboundP2PDisabledName: "Whether inbound p2p is enabled",
maxRollupSizeFlagName: "Max size of a rollup",
}
}
89 changes: 69 additions & 20 deletions go/host/enclave/guardian.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,9 @@ type Guardian struct {

batchInterval time.Duration
rollupInterval time.Duration
blockTime time.Duration
l1StartHash gethcommon.Hash
maxRollupSize uint64

hostInterrupter *stopcontrol.StopControl // host hostInterrupter so we can stop quickly

Expand All @@ -78,6 +80,8 @@ func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator
batchInterval: cfg.BatchInterval,
rollupInterval: cfg.RollupInterval,
l1StartHash: cfg.L1StartHash,
maxRollupSize: cfg.MaxRollupSize,
blockTime: cfg.L1BlockTime,
db: db,
hostInterrupter: interrupter,
logger: logger,
Expand Down Expand Up @@ -518,42 +522,47 @@ func (g *Guardian) periodicBatchProduction() {
func (g *Guardian) periodicRollupProduction() {
defer g.logger.Info("Stopping rollup production")

interval := g.rollupInterval
if interval == 0 {
interval = 3 * time.Second
}
rollupTicker := time.NewTicker(interval)
// attempt to produce rollup every time the timer ticks until we are stopped/interrupted
// check rollup every l1 block time
rollupCheckTicker := time.NewTicker(g.blockTime)
lastSuccessfulRollup := time.Now()

for {
if g.hostInterrupter.IsStopping() {
rollupTicker.Stop()
return // stop periodic rollup production
}
select {
case <-rollupTicker.C:
case <-rollupCheckTicker.C:
if !g.state.IsUpToDate() {
// if we're behind the L1, we don't want to produce rollups
g.logger.Debug("skipping rollup production because L1 is not up to date", "state", g.state)
continue
}
lastBatchNo, err := g.sl.L1Publisher().FetchLatestSeqNo()

fromBatch, err := g.getLatestBatchNo()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should have another skip here that's like:

if time.Since(lastSuccessfulRollup) < minRollupInterval {
   continue // only just produced a rollup, allow time for enclave to consume
}

where minRollupInterval can probably just be like 1*blocktime (it's mostly just incase the timer fires soon after the previous run, not sure how the timer works and if that's possible but I assume it is)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I like to have a minRollupInterval as a performance improvement, perhaps at a later stage?
Mainly due to the not being clear what the minRollupInterval should be.

Should be ok to not have this check for now because:

  • This is an awaited method. Once the rollup is created, it will wait for the receipt, which guarantees one block spacing.
  • This whole process then is called after the 1*l1blocktime duration which is AFTER the success tx mint. So after the await of the receipt (step above) it waits again another block to check if it should rollup.

Would it make sense to add a few timers/logs so that it's possible to monitor this behavior? (for future improvements?)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could be slightly confusing when the timer fires straightaway but shouldn't be a problem, like you say probably just optimisation at worst.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah as discussed on discord the race condition I was worried about isn't an issue any more because you're using the L1 data directly to decide how far we have rolled up. Disregard this!

if err != nil {
g.logger.Warn("encountered error while trying to retrieve latest sequence number", log.ErrKey, err)
continue
}
fromBatch := lastBatchNo.Uint64()
if lastBatchNo.Uint64() > common.L2GenesisSeqNo {
fromBatch++
}
producedRollup, err := g.enclaveClient.CreateRollup(fromBatch)

availBatchesSumSize, err := g.calculateNonRolledupBatchesSize(fromBatch)
if err != nil {
g.logger.Error("unable to produce rollup", log.ErrKey, err)
} else {
g.logger.Error("unable to GetBatchesAfterSize rollup", log.ErrKey, err)
}

// produce and issue rollup when either:
// it has passed g.rollupInterval from last lastSuccessfulRollup
// or the size of accumulated batches is > g.maxRollupSize
if time.Since(lastSuccessfulRollup) > g.rollupInterval || availBatchesSumSize >= g.maxRollupSize {
producedRollup, err := g.enclaveClient.CreateRollup(fromBatch)
otherview marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
g.logger.Error("unable to create rollup", "batchSeqNo", fromBatch)
continue
}
// this method waits until the receipt is received
g.sl.L1Publisher().PublishRollup(producedRollup)
lastSuccessfulRollup = time.Now()
}

case <-g.hostInterrupter.Done():
// interrupted - end periodic process
rollupTicker.Stop()
rollupCheckTicker.Stop()
return
}
}
Expand Down Expand Up @@ -608,3 +617,43 @@ func (g *Guardian) streamEnclaveData() {
}
}
}

func (g *Guardian) calculateNonRolledupBatchesSize(seqNo uint64) (uint64, error) {
var size uint64

if seqNo == 0 { // don't calculate for seqNo 0 batches
return 0, nil
}

currentNo := seqNo
for {
batch, err := g.sl.L2Repo().FetchBatchBySeqNo(big.NewInt(int64(currentNo)))
if err != nil {
if errors.Is(err, errutil.ErrNotFound) {
break // no more batches
}
return 0, err
}

bSize, err := batch.Size()
if err != nil {
return 0, err
}
size += uint64(bSize)
otherview marked this conversation as resolved.
Show resolved Hide resolved
currentNo++
}

return size, nil
}

func (g *Guardian) getLatestBatchNo() (uint64, error) {
lastBatchNo, err := g.sl.L1Publisher().FetchLatestSeqNo()
if err != nil {
return 0, err
}
fromBatch := lastBatchNo.Uint64()
if lastBatchNo.Uint64() > common.L2GenesisSeqNo {
fromBatch++
}
return fromBatch, nil
}
1 change: 1 addition & 0 deletions go/node/docker_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ func (d *DockerNode) startHost() error {
"-p2pBindAddress", fmt.Sprintf("0.0.0.0:%d", d.cfg.hostP2PPort),
"-clientRPCPortHttp", fmt.Sprintf("%d", d.cfg.hostHTTPPort),
"-clientRPCPortWs", fmt.Sprintf("%d", d.cfg.hostWSPort),
"-maxRollupSize=65536",
// host persistence hardcoded to use /data dir within the container, this needs to be mounted
fmt.Sprintf("-useInMemoryDB=%t", d.cfg.hostInMemDB),
fmt.Sprintf("-debugNamespaceEnabled=%t", d.cfg.debugNamespaceEnabled),
Expand Down
1 change: 1 addition & 0 deletions integration/simulation/devnetwork/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ func (n *InMemNodeOperator) createHostContainer() *hostcontainer.HostContainer {
BatchInterval: n.config.BatchInterval,
RollupInterval: n.config.RollupInterval,
L1BlockTime: n.config.L1BlockTime,
MaxRollupSize: 1024 * 64,
}

hostLogger := testlog.Logger().New(log.NodeIDKey, n.l1Wallet.Address(), log.CmpKey, log.HostCmp)
Expand Down
Loading