Skip to content

Commit

Permalink
Merge branch 'master' into bastian/fungible-token-program-recovery
Browse files Browse the repository at this point in the history
  • Loading branch information
turbolent authored Jul 30, 2024
2 parents c9ad7ca + c158096 commit 234a346
Show file tree
Hide file tree
Showing 163 changed files with 7,104 additions and 2,928 deletions.
17 changes: 7 additions & 10 deletions cmd/bootstrap/cmd/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ import (
"github.com/onflow/flow-go/model/dkg"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/signature"
"github.com/onflow/flow-go/state/protocol/inmem"
"github.com/onflow/flow-go/state/protocol/protocol_state/kvstore"
)

// constructRootHeader constructs a header for the root block.
Expand All @@ -22,19 +20,18 @@ func constructRootHeader(rootChain string, rootParent string, rootHeight uint64,
return run.GenerateRootHeader(chainID, parentID, height, timestamp)
}

// constructRootBlock constructs a valid root block based on the given header, setup, and commit.
func constructRootBlock(rootHeader *flow.Header, setup *flow.EpochSetup, commit *flow.EpochCommit) *flow.Block {
// constructRootBlock constructs a valid root block based on the given header and protocol state ID for that block.
func constructRootBlock(rootHeader *flow.Header, protocolStateID flow.Identifier) *flow.Block {
block := &flow.Block{
Header: rootHeader,
Payload: nil,
}
block.SetPayload(flow.Payload{
Guarantees: nil,
Seals: nil,
Receipts: nil,
Results: nil,
// TODO: shortcut in bootstrapping; we will probably have to start with a non-empty KV store in the future
ProtocolStateID: kvstore.NewDefaultKVStore(inmem.EpochProtocolStateFromServiceEvents(setup, commit).ID()).ID(),
Guarantees: nil,
Seals: nil,
Receipts: nil,
Results: nil,
ProtocolStateID: protocolStateID,
})
return block
}
Expand Down
15 changes: 14 additions & 1 deletion cmd/bootstrap/cmd/finalize.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/onflow/flow-go/module/epochs"
"github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/state/protocol/inmem"
"github.com/onflow/flow-go/state/protocol/protocol_state"
"github.com/onflow/flow-go/state/protocol/protocol_state/kvstore"
"github.com/onflow/flow-go/utils/io"
)
Expand Down Expand Up @@ -180,7 +181,19 @@ func finalize(cmd *cobra.Command, args []string) {

// construct serializable root protocol snapshot
log.Info().Msg("constructing root protocol snapshot")
snapshot, err := inmem.SnapshotFromBootstrapStateWithParams(block, result, seal, rootQC, intermediaryData.ProtocolVersion, intermediaryData.EpochCommitSafetyThreshold, kvstore.NewDefaultKVStore)
snapshot, err := inmem.SnapshotFromBootstrapStateWithParams(
block,
result,
seal,
rootQC,
intermediaryData.ProtocolVersion,
func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) {
return kvstore.NewDefaultKVStore(
intermediaryData.EpochCommitSafetyThreshold,
intermediaryData.EpochExtensionViewCount,
epochStateID)
},
)
if err != nil {
log.Fatal().Err(err).Msg("unable to generate root protocol snapshot")
}
Expand Down
1 change: 1 addition & 0 deletions cmd/bootstrap/cmd/finalize_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ func TestFinalize_HappyPath(t *testing.T) {
flagNumViewsInStakingAuction = 50_000
flagNumViewsInDKGPhase = 2_000
flagEpochCommitSafetyThreshold = 1_000
flagEpochExtensionViewCount = 100_000
flagUseDefaultEpochTargetEndTime = true
flagEpochTimingRefCounter = 0
flagEpochTimingRefTimestamp = 0
Expand Down
1 change: 1 addition & 0 deletions cmd/bootstrap/cmd/intermediary.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ type IntermediaryBootstrappingData struct {
type IntermediaryParamsData struct {
ProtocolVersion uint
EpochCommitSafetyThreshold uint64
EpochExtensionViewCount uint64
}

// IntermediaryEpochData stores the root epoch and the epoch config for the execution state
Expand Down
31 changes: 24 additions & 7 deletions cmd/bootstrap/cmd/rootblock.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ import (
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/epochs"
"github.com/onflow/flow-go/state/protocol"
"github.com/onflow/flow-go/state/protocol/inmem"
"github.com/onflow/flow-go/state/protocol/protocol_state/kvstore"
)

var (
Expand All @@ -27,6 +29,7 @@ var (
flagRootTimestamp string
flagProtocolVersion uint
flagEpochCommitSafetyThreshold uint64
flagEpochExtensionViewCount uint64
flagCollectionClusters uint
flagEpochCounter uint64
flagNumViewsInEpoch uint64
Expand Down Expand Up @@ -91,12 +94,14 @@ func addRootBlockCmdFlags() {
rootBlockCmd.Flags().StringVar(&flagRootTimestamp, "root-timestamp", time.Now().UTC().Format(time.RFC3339), "timestamp of the root block (RFC3339)")
rootBlockCmd.Flags().UintVar(&flagProtocolVersion, "protocol-version", flow.DefaultProtocolVersion, "major software version used for the duration of this spork")
rootBlockCmd.Flags().Uint64Var(&flagEpochCommitSafetyThreshold, "epoch-commit-safety-threshold", 500, "defines epoch commitment deadline")
rootBlockCmd.Flags().Uint64Var(&flagEpochExtensionViewCount, "epoch-extension-view-count", 100_000, "length of epoch extension in views, default is 100_000 which is approximately 1 day")

cmd.MarkFlagRequired(rootBlockCmd, "root-chain")
cmd.MarkFlagRequired(rootBlockCmd, "root-parent")
cmd.MarkFlagRequired(rootBlockCmd, "root-height")
cmd.MarkFlagRequired(rootBlockCmd, "protocol-version")
cmd.MarkFlagRequired(rootBlockCmd, "epoch-commit-safety-threshold")
cmd.MarkFlagRequired(rootBlockCmd, "epoch-extension-view-count")

// Epoch timing config - these values must be set identically to `EpochTimingConfig` in the FlowEpoch smart contract.
// See https://github.com/onflow/flow-core-contracts/blob/240579784e9bb8d97d91d0e3213614e25562c078/contracts/epochs/FlowEpoch.cdc#L259-L266
Expand Down Expand Up @@ -187,7 +192,7 @@ func rootBlock(cmd *cobra.Command, args []string) {
log.Info().Msg("")

log.Info().Msg("constructing root QCs for collection node clusters")
clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
clusterQCs := run.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
log.Info().Msg("")

log.Info().Msg("constructing root header")
Expand All @@ -204,6 +209,7 @@ func rootBlock(cmd *cobra.Command, args []string) {
}
intermediaryParamsData := IntermediaryParamsData{
EpochCommitSafetyThreshold: flagEpochCommitSafetyThreshold,
EpochExtensionViewCount: flagEpochExtensionViewCount,
ProtocolVersion: flagProtocolVersion,
}
intermediaryData := IntermediaryBootstrappingData{
Expand All @@ -218,7 +224,15 @@ func rootBlock(cmd *cobra.Command, args []string) {
log.Info().Msg("")

log.Info().Msg("constructing root block")
block := constructRootBlock(header, epochSetup, epochCommit)
rootProtocolState, err := kvstore.NewDefaultKVStore(
flagEpochCommitSafetyThreshold,
flagEpochExtensionViewCount,
inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit).ID(),
)
if err != nil {
log.Fatal().Err(err).Msg("failed to construct root kvstore")
}
block := constructRootBlock(header, rootProtocolState.ID())
err = common.WriteJSON(model.PathRootBlockData, flagOutdir, block)
if err != nil {
log.Fatal().Err(err).Msg("failed to write json")
Expand All @@ -242,23 +256,26 @@ func validateEpochConfig() error {
dkgFinalView := flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 // 3 DKG phases
epochCommitDeadline := flagNumViewsInEpoch - flagEpochCommitSafetyThreshold

defaultSafetyThreshold, err := protocol.DefaultEpochCommitSafetyThreshold(chainID)
defaultEpochSafetyParams, err := protocol.DefaultEpochSafetyParams(chainID)
if err != nil {
return fmt.Errorf("could not get default epoch commit safety threshold: %w", err)
}

// sanity check: the safety threshold is >= the default for the chain
if flagEpochCommitSafetyThreshold < defaultSafetyThreshold {
return fmt.Errorf("potentially unsafe epoch config: epoch commit safety threshold smaller than expected (%d < %d)", flagEpochCommitSafetyThreshold, defaultSafetyThreshold)
if flagEpochCommitSafetyThreshold < defaultEpochSafetyParams.FinalizationSafetyThreshold {
return fmt.Errorf("potentially unsafe epoch config: epoch commit safety threshold smaller than expected (%d < %d)", flagEpochCommitSafetyThreshold, defaultEpochSafetyParams.FinalizationSafetyThreshold)
}
if flagEpochExtensionViewCount < defaultEpochSafetyParams.EpochExtensionViewCount {
return fmt.Errorf("potentially unsafe epoch config: epoch extension view count smaller than expected (%d < %d)", flagEpochExtensionViewCount, defaultEpochSafetyParams.EpochExtensionViewCount)
}
// sanity check: epoch commitment deadline cannot be before the DKG end
if epochCommitDeadline <= dkgFinalView {
return fmt.Errorf("invalid epoch config: the epoch commitment deadline (%d) is before the DKG final view (%d)", epochCommitDeadline, dkgFinalView)
}
// sanity check: the difference between DKG end and safety threshold is also >= the default safety threshold
if epochCommitDeadline-dkgFinalView < defaultSafetyThreshold {
if epochCommitDeadline-dkgFinalView < defaultEpochSafetyParams.FinalizationSafetyThreshold {
return fmt.Errorf("potentially unsafe epoch config: time between DKG end and epoch commitment deadline is smaller than expected (%d-%d < %d)",
epochCommitDeadline, dkgFinalView, defaultSafetyThreshold)
epochCommitDeadline, dkgFinalView, defaultEpochSafetyParams.FinalizationSafetyThreshold)
}
return nil
}
Expand Down
208 changes: 208 additions & 0 deletions cmd/bootstrap/run/epochs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
package run

import (
"fmt"

"github.com/rs/zerolog"

"github.com/onflow/cadence"

"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/fvm/systemcontracts"
"github.com/onflow/flow-go/model/bootstrap"
model "github.com/onflow/flow-go/model/bootstrap"
"github.com/onflow/flow-go/model/cluster"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/model/flow/filter"
"github.com/onflow/flow-go/state/protocol/inmem"
)

// GenerateRecoverEpochTxArgs generates the required transaction arguments for the `recoverEpoch` transaction.
func GenerateRecoverEpochTxArgs(log zerolog.Logger,
internalNodePrivInfoDir string,
nodeConfigJson string,
collectionClusters int,
epochCounter uint64,
rootChainID flow.ChainID,
numViewsInStakingAuction uint64,
numViewsInEpoch uint64,
targetDuration uint64,
initNewEpoch bool,
snapshot *inmem.Snapshot,
) ([]cadence.Value, error) {
epoch := snapshot.Epochs().Current()

currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant)
if err != nil {
return nil, fmt.Errorf("failed to get valid protocol participants from snapshot: %w", err)
}
// We need canonical ordering here; sanity check to enforce this:
if !currentEpochIdentities.Sorted(flow.Canonical[flow.Identity]) {
return nil, fmt.Errorf("identies from snapshot not in canonical order")
}

// separate collector nodes by internal and partner nodes
collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
internalCollectors := make(flow.IdentityList, 0)
partnerCollectors := make(flow.IdentityList, 0)

log.Info().Msg("collecting internal node network and staking keys")
internalNodes, err := common.ReadFullInternalNodeInfos(log, internalNodePrivInfoDir, nodeConfigJson)
if err != nil {
return nil, fmt.Errorf("failed to read full internal node infos: %w", err)
}

internalNodesMap := make(map[flow.Identifier]struct{})
for _, node := range internalNodes {
if !currentEpochIdentities.Exists(node.Identity()) {
return nil, fmt.Errorf("node ID found in internal node infos missing from protocol snapshot identities %s: %w", node.NodeID, err)
}
internalNodesMap[node.NodeID] = struct{}{}
}
log.Info().Msg("")

for _, collector := range collectors {
if _, ok := internalNodesMap[collector.NodeID]; ok {
internalCollectors = append(internalCollectors, collector)
} else {
partnerCollectors = append(partnerCollectors, collector)
}
}

currentEpochDKG, err := epoch.DKG()
if err != nil {
return nil, fmt.Errorf("failed to get DKG for current epoch: %w", err)
}

log.Info().Msg("computing collection node clusters")

assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, collectionClusters)
if err != nil {
log.Fatal().Err(err).Msg("unable to generate cluster assignment")
}
log.Info().Msg("")

log.Info().Msg("constructing root blocks for collection node clusters")
clusterBlocks := GenerateRootClusterBlocks(epochCounter, clusters)
log.Info().Msg("")

log.Info().Msg("constructing root QCs for collection node clusters")
clusterQCs := ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
log.Info().Msg("")

dkgPubKeys := make([]cadence.Value, 0)
nodeIds := make([]cadence.Value, 0)

// NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus
// committee in the RecoveryEpoch must be identical to the committee which participated in that DKG.
dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String())
if cdcErr != nil {
log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string")
}
dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc)
for _, id := range currentEpochIdentities {
if id.GetRole() == flow.RoleConsensus {
dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID())
if keyShareErr != nil {
log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID()))
}
dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String())
if cdcErr != nil {
log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID()))
}
dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc)
}
nodeIdCdc, err := cadence.NewString(id.GetNodeID().String())
if err != nil {
log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID()))
}
nodeIds = append(nodeIds, nodeIdCdc)
}
clusterQCAddress := systemcontracts.SystemContractsForChain(rootChainID).ClusterQC.Address.String()
qcVoteData, err := common.ConvertClusterQcsCdc(clusterQCs, clusters, clusterQCAddress)
if err != nil {
log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type")
}

currEpochFinalView, err := epoch.FinalView()
if err != nil {
log.Fatal().Err(err).Msg("failed to get final view of current epoch")
}

currEpochTargetEndTime, err := epoch.TargetEndTime()
if err != nil {
log.Fatal().Err(err).Msg("failed to get target end time of current epoch")
}

args := []cadence.Value{
// epoch start view
cadence.NewUInt64(currEpochFinalView + 1),
// staking phase end view
cadence.NewUInt64(currEpochFinalView + numViewsInStakingAuction),
// epoch end view
cadence.NewUInt64(currEpochFinalView + numViewsInEpoch),
// target duration
cadence.NewUInt64(targetDuration),
// target end time
cadence.NewUInt64(currEpochTargetEndTime),
// clusters,
common.ConvertClusterAssignmentsCdc(assignments),
// qcVoteData
cadence.NewArray(qcVoteData),
// dkg pub keys
cadence.NewArray(dkgPubKeys),
// node ids
cadence.NewArray(nodeIds),
// recover the network by initializing a new recover epoch which will increment the smart contract epoch counter
// or overwrite the epoch metadata for the current epoch
cadence.NewBool(initNewEpoch),
}

return args, nil
}

// ConstructRootQCsForClusters constructs a root QC for each cluster in the list.
// Args:
// - log: the logger instance.
// - clusterList: list of clusters
// - nodeInfos: list of NodeInfos (must contain all internal nodes)
// - clusterBlocks: list of root blocks (one for each cluster)
// Returns:
// - flow.AssignmentList: the generated assignment list.
// - flow.ClusterList: the generate collection cluster list.
func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate {
if len(clusterBlocks) != len(clusterList) {
log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)).
Msg("number of clusters needs to equal number of cluster blocks")
}

qcs := make([]*flow.QuorumCertificate, len(clusterBlocks))
for i, cluster := range clusterList {
signers := filterClusterSigners(cluster, nodeInfos)

qc, err := GenerateClusterRootQC(signers, cluster, clusterBlocks[i])
if err != nil {
log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed")
}
qcs[i] = qc
}

return qcs
}

// Filters a list of nodes to include only nodes that will sign the QC for the
// given cluster. The resulting list of nodes is only nodes that are in the
// given cluster AND are not partner nodes (ie. we have the private keys).
func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo {
var filtered []model.NodeInfo
for _, node := range nodeInfos {
_, isInCluster := cluster.ByNodeID(node.NodeID)
isPrivateKeyAvailable := node.Type() == model.NodeInfoTypePrivate

if isInCluster && isPrivateKeyAvailable {
filtered = append(filtered, node)
}
}

return filtered
}
Loading

0 comments on commit 234a346

Please sign in to comment.