Skip to content

Commit

Permalink
implement active enclave upgrade (#2167)
Browse files Browse the repository at this point in the history
  • Loading branch information
tudor-malene authored Nov 28, 2024
1 parent 3b59d78 commit dc8945c
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 1,105 deletions.
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ require (
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ecies/go/v2 v2.0.9 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
Expand Down
1,017 changes: 0 additions & 1,017 deletions go.sum

Large diffs are not rendered by default.

131 changes: 57 additions & 74 deletions go/enclave/enclave_admin_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,74 +68,35 @@ func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, storage storage
}
}
sharedSecretProcessor := components.NewSharedSecretProcessor(mgmtContractLib, attestationProvider, enclaveKeyService.EnclaveID(), storage, logger)

dataEncryptionService := crypto.NewDataEncryptionService(logger)
dataCompressionService := compression.NewBrotliDataCompressionService()
rProducer := components.NewRollupProducer(enclaveKeyService.EnclaveID(), storage, registry, logger)
rollupCompression := components.NewRollupCompression(registry, batchExecutor, dataEncryptionService, dataCompressionService, storage, gethEncodingService, chainConfig, logger)
sigVerifier, err := components.NewSignatureValidator(storage)
if err != nil {
logger.Crit("Could not initialise the signature validator", log.ErrKey, err)
}

dataEncryptionService := crypto.NewDataEncryptionService(logger)
dataCompressionService := compression.NewBrotliDataCompressionService()

rollupCompression := components.NewRollupCompression(registry, batchExecutor, dataEncryptionService, dataCompressionService, storage, gethEncodingService, chainConfig, logger)
rollupProducer := components.NewRollupProducer(enclaveKeyService.EnclaveID(), storage, registry, logger)
rollupConsumer := components.NewRollupConsumer(mgmtContractLib, registry, rollupCompression, storage, logger, sigVerifier)

sequencerService := nodetype.NewSequencer(
blockProcessor,
batchExecutor,
registry,
rProducer,
rollupCompression,
gethEncodingService,
logger,
chainConfig,
enclaveKeyService,
mempool,
storage,
dataEncryptionService,
dataCompressionService,
nodetype.SequencerSettings{
MaxBatchSize: config.MaxBatchSize,
MaxRollupSize: config.MaxRollupSize,
GasPaymentAddress: config.GasPaymentAddress,
BatchGasLimit: config.GasBatchExecutionLimit,
BaseFee: config.BaseFee,
},
)

validatorService := nodetype.NewValidator(
blockProcessor,
batchExecutor,
registry,
chainConfig,
storage,
sigVerifier,
mempool,
logger,
)

var service nodetype.NodeType = validatorService
if config.NodeType == common.ActiveSequencer {
mempool.SetValidateMode(false)
// Todo - this is temporary - until the host calls `AddSequencer`
err := storage.StoreNewEnclave(context.Background(), enclaveKeyService.EnclaveID(), enclaveKeyService.PublicKey())
if err != nil {
logger.Crit("Failed to store enclave key", log.ErrKey, err)
return nil
}
err = storage.StoreNodeType(context.Background(), enclaveKeyService.EnclaveID(), common.ActiveSequencer)
if err != nil {
logger.Crit("Failed to store node type", log.ErrKey, err)
return nil
}
service = sequencerService
seqSettings := nodetype.SequencerSettings{
MaxBatchSize: config.MaxBatchSize,
MaxRollupSize: config.MaxRollupSize,
GasPaymentAddress: config.GasPaymentAddress,
BatchGasLimit: config.GasBatchExecutionLimit,
BaseFee: config.BaseFee,
}

return &enclaveAdminService{
sequencerService := nodetype.NewSequencer(blockProcessor, batchExecutor, registry, rollupProducer, rollupCompression, gethEncodingService, logger, chainConfig, enclaveKeyService, mempool, storage, dataEncryptionService, dataCompressionService, seqSettings)
validatorService := nodetype.NewValidator(blockProcessor, batchExecutor, registry, chainConfig, storage, sigVerifier, mempool, logger)

eas := &enclaveAdminService{
config: config,
mainMutex: sync.Mutex{},
logger: logger,
l1BlockProcessor: blockProcessor,
service: service,
service: validatorService,
sequencerService: sequencerService,
validatorService: validatorService,
sharedSecretProcessor: sharedSecretProcessor,
Expand All @@ -151,44 +112,66 @@ func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, storage storage
enclaveKeyService: enclaveKeyService,
mempool: mempool,
}

// if the current enclave was already marked as an active/backup sequencer, it needs to set the right mempool mode
if eas.isBackupSequencer(context.Background()) || eas.isActiveSequencer(context.Background()) {
mempool.SetValidateMode(false)
}
if eas.isActiveSequencer(context.Background()) {
eas.service = sequencerService
}

// Todo - this is temporary - until the host calls `AddSequencer` instead of relying on the config - which can be removed
if config.NodeType == common.ActiveSequencer {
err := storage.StoreNewEnclave(context.Background(), enclaveKeyService.EnclaveID(), enclaveKeyService.PublicKey())
if err != nil {
logger.Crit("Failed to store enclave key", log.ErrKey, err)
return nil
}
err = eas.MakeActive()
if err != nil {
logger.Crit("Failed to create sequencer", log.ErrKey, err)
}
}

return eas
}

func (e *enclaveAdminService) AddSequencer(id common.EnclaveID, proof types.Receipt) common.SystemError {
e.mainMutex.Lock()
defer e.mainMutex.Unlock()

// by default all enclaves start their life as a validator
// todo - use the proof

// store in the database the enclave id
err := e.storage.StoreNodeType(context.Background(), id, common.BackupSequencer)
if err != nil {
return responses.ToInternalError(err)
}

// compare the id with the current enclaveId and if they match - do something so that the current enclave behaves as a "backup sequencer"
// the host will specifically mark the active enclave
if e.enclaveKeyService.EnclaveID() == id {
e.mempool.SetValidateMode(false)
}

// todo - use the proof
return nil
}

func (e *enclaveAdminService) MakeActive() common.SystemError {
e.mainMutex.Lock()
defer e.mainMutex.Unlock()

if !e.isBackupSequencer(context.Background()) {
return fmt.Errorf("only backup sequencer can become active")
}
// todo
// change the node type service
// do something with the mempool
// make some other checks?
// Once we've got the sequencer Enclave IDs permission list monitoring we should include that check here probably.
// We could even make it so that all sequencer enclaves start as backup and it can't be activated until the permissioning is done?
// todo - uncomment once AddSequencer is called by the host
//if !e.isBackupSequencer(context.Background()) {
// return fmt.Errorf("only backup sequencer can become active")
//}

// todo - remove because this enclave should already be a backup sequencer
e.mempool.SetValidateMode(false)

err := e.storage.StoreNodeType(context.Background(), e.enclaveKeyService.EnclaveID(), common.ActiveSequencer)
if err != nil {
return err
}
e.service = e.sequencerService
return nil
}

Expand Down Expand Up @@ -558,12 +541,12 @@ func (e *enclaveAdminService) isValidator(ctx context.Context) bool { //nolint:u

func (e *enclaveAdminService) getNodeType(ctx context.Context) common.NodeType {
id := e.enclaveKeyService.EnclaveID()
_, nodeType, err := e.storage.GetEnclavePubKey(ctx, id)
attestedEnclave, err := e.storage.GetEnclavePubKey(ctx, id)
if err != nil {
e.logger.Crit("could not read enclave pub key", log.ErrKey, err)
return 0
e.logger.Warn("could not read enclave pub key. Defaulting to validator type", log.ErrKey, err)
return common.Validator
}
return nodeType
return attestedEnclave.Type
}

func exportCrossChainData(ctx context.Context, storage storage.Storage, fromSeqNo uint64, toSeqNo uint64) (*common.ExtCrossChainBundle, error) {
Expand Down
13 changes: 12 additions & 1 deletion go/enclave/storage/cache_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ const (
receiptCost = 1024 * 50
contractCost = 60
eventTypeCost = 120
enclaveCost = 100
)

type CacheService struct {
Expand Down Expand Up @@ -72,6 +73,9 @@ type CacheService struct {
// only sender can view configured
receiptCache *cache.Cache[*CachedReceipt]

// store the enclaves from the network
attestedEnclavesCache *cache.Cache[*AttestedEnclave]

logger gethlog.Logger
}

Expand All @@ -87,6 +91,8 @@ func NewCacheService(logger gethlog.Logger, testMode bool) *CacheService {
nrBatchesWithContent := 50 // ~100M
nrReceipts := 10_000 // ~1G

nrEnclaves := 20

if testMode {
nrReceipts = 500 //~50M
}
Expand All @@ -105,7 +111,8 @@ func NewCacheService(logger gethlog.Logger, testMode bool) *CacheService {
contractAddressCache: cache.New[*enclavedb.Contract](newCache(logger, nrContractAddresses, contractCost)),
eventTypeCache: cache.New[*enclavedb.EventType](newCache(logger, nrEventTypes, eventTypeCost)),

receiptCache: cache.New[*CachedReceipt](newCache(logger, nrReceipts, receiptCost)),
receiptCache: cache.New[*CachedReceipt](newCache(logger, nrReceipts, receiptCost)),
attestedEnclavesCache: cache.New[*AttestedEnclave](newCache(logger, nrEnclaves, enclaveCost)),

// cache the latest received batches to avoid a lookup when streaming it back to the host after processing
lastBatchesCache: cache.New[*core.Batch](newCache(logger, nrBatchesWithContent, batchCost)),
Expand Down Expand Up @@ -203,6 +210,10 @@ func (cs *CacheService) ReadConvertedHeader(ctx context.Context, batchHash commo
return getCachedValue(ctx, cs.convertedGethHeaderCache, cs.logger, batchHash, blockHeaderCost, onCacheMiss, true)
}

func (cs *CacheService) ReadEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID, onCacheMiss func(any) (*AttestedEnclave, error)) (*AttestedEnclave, error) {
return getCachedValue(ctx, cs.attestedEnclavesCache, cs.logger, enclaveId, enclaveCost, onCacheMiss, true)
}

// getCachedValue - returns the cached value for the provided key. If the key is not found, then invoke the 'onCacheMiss' function
// which returns the value, and cache it
func getCachedValue[V any](ctx context.Context, cache *cache.Cache[*V], logger gethlog.Logger, key any, cost int64, onCacheMiss func(any) (*V, error), cacheIfMissing bool) (*V, error) {
Expand Down
2 changes: 1 addition & 1 deletion go/enclave/storage/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ type TransactionStorage interface {
}

type AttestationStorage interface {
GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*ecdsa.PublicKey, common.NodeType, error)
GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*AttestedEnclave, error)
StoreNewEnclave(ctx context.Context, enclaveId common.EnclaveID, key *ecdsa.PublicKey) error
StoreNodeType(ctx context.Context, enclaveId common.EnclaveID, nodeType common.NodeType) error
}
Expand Down
28 changes: 17 additions & 11 deletions go/enclave/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ const (
enclaveKeyCfg = "ENCLAVE_KEY"
)

type AttestedEnclave struct {
PubKey *ecdsa.PublicKey
Type common.NodeType
}

// todo - this file needs splitting up based on concerns
type storageImpl struct {
db enclavedb.EnclaveDB
Expand Down Expand Up @@ -450,20 +455,21 @@ func (s *storageImpl) ExistsTransactionReceipt(ctx context.Context, txHash commo
return enclavedb.ExistsReceipt(ctx, s.db.GetSQLDB(), txHash)
}

// todo - cache
func (s *storageImpl) GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*ecdsa.PublicKey, common.NodeType, error) {
func (s *storageImpl) GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*AttestedEnclave, error) {
defer s.logDuration("GetEnclavePubKey", measure.NewStopwatch())
key, nodeType, err := enclavedb.FetchAttestation(ctx, s.db.GetSQLDB(), enclaveId)
if err != nil {
return nil, 0, fmt.Errorf("could not retrieve attestation key for address %s. Cause: %w", enclaveId, err)
}
return s.cachingService.ReadEnclavePubKey(ctx, enclaveId, func(a any) (*AttestedEnclave, error) {
key, nodeType, err := enclavedb.FetchAttestation(ctx, s.db.GetSQLDB(), enclaveId)
if err != nil {
return nil, fmt.Errorf("could not retrieve attestation key for address %s. Cause: %w", enclaveId, err)
}

publicKey, err := gethcrypto.DecompressPubkey(key)
if err != nil {
return nil, 0, fmt.Errorf("could not parse key from db. Cause: %w", err)
}
publicKey, err := gethcrypto.DecompressPubkey(key)
if err != nil {
return nil, fmt.Errorf("could not parse key from db. Cause: %w", err)
}

return publicKey, nodeType, nil
return &AttestedEnclave{PubKey: publicKey, Type: nodeType}, nil
})
}

func (s *storageImpl) StoreNodeType(ctx context.Context, enclaveId common.EnclaveID, nodeType common.NodeType) error {
Expand Down

0 comments on commit dc8945c

Please sign in to comment.